/drivers/media/usb/uvc/ |
D | uvc_queue.c | 41 uvc_queue_to_stream(struct uvc_video_queue *queue) in uvc_queue_to_stream() argument 43 return container_of(queue, struct uvc_streaming, queue); in uvc_queue_to_stream() 51 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument 58 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers() 59 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers() 61 queue); in uvc_queue_return_buffers() 62 list_del(&buf->queue); in uvc_queue_return_buffers() 77 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 78 struct uvc_streaming *stream = uvc_queue_to_stream(queue); in uvc_queue_setup() 95 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local [all …]
|
/drivers/usb/gadget/function/ |
D | uvc_queue.c | 48 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 49 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 73 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 89 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_queue() local 94 spin_lock_irqsave(&queue->irqlock, flags); in uvc_buffer_queue() 96 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { in uvc_buffer_queue() 97 list_add_tail(&buf->queue, &queue->irqqueue); in uvc_buffer_queue() 106 spin_unlock_irqrestore(&queue->irqlock, flags); in uvc_buffer_queue() 117 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, in uvcg_queue_init() argument [all …]
|
D | uvc_queue.h | 30 struct list_head queue; member 43 struct vb2_queue queue; member 54 static inline int uvc_queue_streaming(struct uvc_video_queue *queue) in uvc_queue_streaming() argument 56 return vb2_is_streaming(&queue->queue); in uvc_queue_streaming() 59 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 62 void uvcg_free_buffers(struct uvc_video_queue *queue); 64 int uvcg_alloc_buffers(struct uvc_video_queue *queue, 67 int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); 69 int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); 71 int uvcg_dequeue_buffer(struct uvc_video_queue *queue, [all …]
|
D | uvc_video.c | 37 if (buf->bytesused - video->queue.buf_used <= len - 2) in uvc_video_encode_header() 47 struct uvc_video_queue *queue = &video->queue; in uvc_video_encode_data() local 52 mem = buf->mem + queue->buf_used; in uvc_video_encode_data() 53 nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); in uvc_video_encode_data() 56 queue->buf_used += nbytes; in uvc_video_encode_data() 87 if (buf->bytesused == video->queue.buf_used) { in uvc_video_encode_bulk() 88 video->queue.buf_used = 0; in uvc_video_encode_bulk() 90 uvcg_queue_next_buffer(&video->queue, buf); in uvc_video_encode_bulk() 97 buf->bytesused == video->queue.buf_used) in uvc_video_encode_bulk() 120 if (buf->bytesused == video->queue.buf_used) { in uvc_video_encode_isoc() [all …]
|
/drivers/net/wireless/cw1200/ |
D | queue.c | 29 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 31 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 32 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 34 queue->queue_id); in __cw1200_queue_lock() 35 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 39 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 41 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() 42 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock() 43 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock() 45 queue->queue_id); in __cw1200_queue_unlock() [all …]
|
/drivers/net/xen-netback/ |
D | netback.c | 94 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 97 static void make_tx_response(struct xenvif_queue *queue, 100 static void push_tx_responses(struct xenvif_queue *queue); 102 static inline int tx_work_todo(struct xenvif_queue *queue); 104 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 111 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument 114 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn() 117 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument 120 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr() 161 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument [all …]
|
D | interface.c | 54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 58 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 63 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 69 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 79 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument 83 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt() 85 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt() 91 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local 94 old = xenvif_atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); in xenvif_tx_interrupt() [all …]
|
/drivers/net/wireless/b43legacy/ |
D | pio.c | 35 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 37 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 41 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 44 if (queue->need_workarounds) { in tx_octet() 45 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 46 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 49 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 51 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 76 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 84 if (queue->need_workarounds) { in tx_data() [all …]
|
/drivers/misc/genwqe/ |
D | card_ddcb.c | 91 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument 93 return queue->ddcb_next == queue->ddcb_act; in queue_empty() 96 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument 98 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs() 99 return queue->ddcb_next - queue->ddcb_act; in queue_enqueued_ddcbs() 101 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); in queue_enqueued_ddcbs() 104 static int queue_free_ddcbs(struct ddcb_queue *queue) in queue_free_ddcbs() argument 106 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; in queue_free_ddcbs() 172 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) in print_ddcb_info() argument 183 cd->card_idx, queue->ddcb_act, queue->ddcb_next); in print_ddcb_info() [all …]
|
D | card_debugfs.c | 236 struct ddcb_queue *queue; in genwqe_ddcb_info_show() local 239 queue = &cd->queue; in genwqe_ddcb_info_show() 250 queue->ddcb_max, (long long)queue->ddcb_daddr, in genwqe_ddcb_info_show() 251 (long long)queue->ddcb_daddr + in genwqe_ddcb_info_show() 252 (queue->ddcb_max * DDCB_LENGTH), in genwqe_ddcb_info_show() 253 (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, in genwqe_ddcb_info_show() 254 queue->ddcbs_max_in_flight, queue->ddcbs_completed, in genwqe_ddcb_info_show() 255 queue->return_on_busy, queue->wait_on_busy, in genwqe_ddcb_info_show() 268 queue->IO_QUEUE_CONFIG, in genwqe_ddcb_info_show() 269 __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), in genwqe_ddcb_info_show() [all …]
|
/drivers/net/ |
D | xen-netfront.c | 200 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument 204 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb() 205 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb() 209 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument 213 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref() 214 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_get_rx_ref() 230 struct netfront_queue *queue = (struct netfront_queue *)data; in rx_refill_timeout() local 231 napi_schedule(&queue->napi); in rx_refill_timeout() 234 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument 236 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available() [all …]
|
D | eql.c | 140 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); 147 spin_lock(&eql->queue.lock); in eql_timer() 148 head = &eql->queue.all_slaves; in eql_timer() 157 eql_kill_one_slave(&eql->queue, slave); in eql_timer() 161 spin_unlock(&eql->queue.lock); in eql_timer() 186 spin_lock_init(&eql->queue.lock); in eql_setup() 187 INIT_LIST_HEAD(&eql->queue.all_slaves); in eql_setup() 188 eql->queue.master_dev = dev; in eql_setup() 213 BUG_ON(!list_empty(&eql->queue.all_slaves)); in eql_open() 223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) in eql_kill_one_slave() argument [all …]
|
/drivers/staging/rdma/ehca/ |
D | ipz_pt_fn.c | 51 void *ipz_qpageit_get_inc(struct ipz_queue *queue) in ipz_qpageit_get_inc() argument 53 void *ret = ipz_qeit_get(queue); in ipz_qpageit_get_inc() 54 queue->current_q_offset += queue->pagesize; in ipz_qpageit_get_inc() 55 if (queue->current_q_offset > queue->queue_length) { in ipz_qpageit_get_inc() 56 queue->current_q_offset -= queue->pagesize; in ipz_qpageit_get_inc() 59 if (((u64)ret) % queue->pagesize) { in ipz_qpageit_get_inc() 66 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) in ipz_qeit_eq_get_inc() argument 68 void *ret = ipz_qeit_get(queue); in ipz_qeit_eq_get_inc() 69 u64 last_entry_in_q = queue->queue_length - queue->qe_size; in ipz_qeit_eq_get_inc() 71 queue->current_q_offset += queue->qe_size; in ipz_qeit_eq_get_inc() [all …]
|
D | ipz_pt_fn.h | 93 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) in ipz_qeit_calc() argument 96 if (q_offset >= queue->queue_length) in ipz_qeit_calc() 98 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; in ipz_qeit_calc() 106 static inline void *ipz_qeit_get(struct ipz_queue *queue) in ipz_qeit_get() argument 108 return ipz_qeit_calc(queue, queue->current_q_offset); in ipz_qeit_get() 118 void *ipz_qpageit_get_inc(struct ipz_queue *queue); 126 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue) in ipz_qeit_get_inc() argument 128 void *ret = ipz_qeit_get(queue); in ipz_qeit_get_inc() 129 queue->current_q_offset += queue->qe_size; in ipz_qeit_get_inc() 130 if (queue->current_q_offset >= queue->queue_length) { in ipz_qeit_get_inc() [all …]
|
/drivers/net/wireless/rt2x00/ |
D | rt2x00queue.c | 36 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local 37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb() 48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb() 107 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb() 123 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb() 498 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data() 510 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data() 539 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local 541 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor() 547 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); in rt2x00queue_write_tx_descriptor() [all …]
|
D | rt2x00usb.c | 238 struct data_queue *queue; in rt2x00usb_work_txdone() local 241 tx_queue_for_each(rt2x00dev, queue) { in rt2x00usb_work_txdone() 242 while (!rt2x00queue_empty(queue)) { in rt2x00usb_work_txdone() 243 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); in rt2x00usb_work_txdone() 257 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_interrupt_txdone() 284 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_kick_tx_entry() 312 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), in rt2x00usb_kick_tx_entry() 350 skbdesc->desc_len = entry->queue->desc_size; in rt2x00usb_work_rxdone() 362 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_interrupt_rxdone() 377 if (urb->actual_length < entry->queue->desc_size || urb->status) in rt2x00usb_interrupt_rxdone() [all …]
|
D | rt2x00mmio.c | 62 struct data_queue *queue = rt2x00dev->rx; in rt2x00mmio_rxdone() local 69 entry = rt2x00queue_get_entry(queue, Q_INDEX); in rt2x00mmio_rxdone() 80 skbdesc->desc_len = entry->queue->desc_size; in rt2x00mmio_rxdone() 99 void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop) in rt2x00mmio_flush_queue() argument 103 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) in rt2x00mmio_flush_queue() 112 struct data_queue *queue) in rt2x00mmio_alloc_queue_dma() argument 123 queue->limit * queue->desc_size, &dma, in rt2x00mmio_alloc_queue_dma() 131 for (i = 0; i < queue->limit; i++) { in rt2x00mmio_alloc_queue_dma() 132 entry_priv = queue->entries[i].priv_data; in rt2x00mmio_alloc_queue_dma() 133 entry_priv->desc = addr + i * queue->desc_size; in rt2x00mmio_alloc_queue_dma() [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_marker.c | 37 void vmw_marker_queue_init(struct vmw_marker_queue *queue) in vmw_marker_queue_init() argument 39 INIT_LIST_HEAD(&queue->head); in vmw_marker_queue_init() 40 queue->lag = 0; in vmw_marker_queue_init() 41 queue->lag_time = ktime_get_raw_ns(); in vmw_marker_queue_init() 42 spin_lock_init(&queue->lock); in vmw_marker_queue_init() 45 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue) in vmw_marker_queue_takedown() argument 49 spin_lock(&queue->lock); in vmw_marker_queue_takedown() 50 list_for_each_entry_safe(marker, next, &queue->head, head) { in vmw_marker_queue_takedown() 53 spin_unlock(&queue->lock); in vmw_marker_queue_takedown() 56 int vmw_marker_push(struct vmw_marker_queue *queue, in vmw_marker_push() argument [all …]
|
/drivers/scsi/arm/ |
D | queue.c | 58 int queue_initialise (Queue_t *queue) in queue_initialise() argument 63 spin_lock_init(&queue->queue_lock); in queue_initialise() 64 INIT_LIST_HEAD(&queue->head); in queue_initialise() 65 INIT_LIST_HEAD(&queue->free); in queue_initialise() 73 queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL); in queue_initialise() 78 list_add(&q->list, &queue->free); in queue_initialise() 82 return queue->alloc != NULL; in queue_initialise() 90 void queue_free (Queue_t *queue) in queue_free() argument 92 if (!list_empty(&queue->head)) in queue_free() 93 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); in queue_free() [all …]
|
D | queue.h | 25 extern int queue_initialise (Queue_t *queue); 32 extern void queue_free (Queue_t *queue); 40 extern struct scsi_cmnd *queue_remove (Queue_t *queue); 49 extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, 52 #define queue_add_cmd_ordered(queue,SCpnt) \ argument 53 __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE) 54 #define queue_add_cmd_tail(queue,SCpnt) \ argument 55 __queue_add(queue,SCpnt,0) 64 extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head); 75 extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, [all …]
|
/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 210 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument 214 if (q_offset >= queue->queue_length) in hw_qeit_calc() 215 q_offset -= queue->queue_length; in hw_qeit_calc() 216 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc() 220 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument 222 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get() 225 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument 227 queue->current_q_offset += queue->qe_size; in hw_qeit_inc() 228 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc() 229 queue->current_q_offset = 0; in hw_qeit_inc() [all …]
|
/drivers/net/wireless/ath/ath5k/ |
D | qcu.c | 63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument 66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_num_tx_pending() 69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_num_tx_pending() 76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); in ath5k_hw_num_tx_pending() 82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) in ath5k_hw_num_tx_pending() 94 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_release_tx_queue() argument 96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) in ath5k_hw_release_tx_queue() 100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; in ath5k_hw_release_tx_queue() 102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); in ath5k_hw_release_tx_queue() 138 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, in ath5k_hw_get_tx_queueprops() argument [all …]
|
/drivers/soc/ti/ |
D | knav_qmss_acc.c | 45 int range_base, queue; in __knav_acc_notify() local 50 for (queue = 0; queue < range->num_queues; queue++) { in __knav_acc_notify() 52 queue); in __knav_acc_notify() 56 range_base + queue); in __knav_acc_notify() 61 queue = acc->channel - range->acc_info.start_channel; in __knav_acc_notify() 62 inst = knav_range_offset_to_inst(kdev, range, queue); in __knav_acc_notify() 64 range_base + queue); in __knav_acc_notify() 104 int range_base, channel, queue = 0; in knav_acc_int_handler() local 115 for (queue = 0; queue < range->num_irqs; queue++) in knav_acc_int_handler() 116 if (range->irqs[queue].irq == irq) in knav_acc_int_handler() [all …]
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_kernel_queue.c | 124 if (init_queue(&kq->queue, prop) != 0) in initialize() 127 kq->queue->device = dev; in initialize() 128 kq->queue->process = kfd_get_process(current); in initialize() 130 retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd, in initialize() 131 &kq->queue->mqd_mem_obj, in initialize() 132 &kq->queue->gart_mqd_addr, in initialize() 133 &kq->queue->properties); in initialize() 140 kq->queue->pipe = KFD_CIK_HIQ_PIPE; in initialize() 141 kq->queue->queue = KFD_CIK_HIQ_QUEUE; in initialize() 142 kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe, in initialize() [all …]
|
/drivers/net/ethernet/hisilicon/hns/ |
D | hns_dsaf_rcb.c | 728 void hns_rcb_update_stats(struct hnae_queue *queue) in hns_rcb_update_stats() argument 731 container_of(queue, struct ring_pair_cb, q); in hns_rcb_update_stats() 737 hw_stats->rx_pkts += dsaf_read_dev(queue, in hns_rcb_update_stats() 739 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); in hns_rcb_update_stats() 746 hw_stats->tx_pkts += dsaf_read_dev(queue, in hns_rcb_update_stats() 748 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); in hns_rcb_update_stats() 761 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) in hns_rcb_get_stats() argument 765 container_of(queue, struct ring_pair_cb, q); in hns_rcb_get_stats() 772 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); in hns_rcb_get_stats() 774 regs_buff[4] = queue->tx_ring.stats.tx_pkts; in hns_rcb_get_stats() [all …]
|