/drivers/net/ethernet/huawei/hinic/ |
D | hinic_rx.c | 53 void hinic_rxq_clean_stats(struct hinic_rxq *rxq) in hinic_rxq_clean_stats() argument 55 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in hinic_rxq_clean_stats() 71 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) in hinic_rxq_get_stats() argument 73 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in hinic_rxq_get_stats() 91 static void rxq_stats_init(struct hinic_rxq *rxq) in rxq_stats_init() argument 93 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in rxq_stats_init() 96 hinic_rxq_clean_stats(rxq); in rxq_stats_init() 99 static void rx_csum(struct hinic_rxq *rxq, u32 status, in rx_csum() argument 102 struct net_device *netdev = rxq->netdev; in rx_csum() 115 rxq->rxq_stats.csum_errors++; in rx_csum() [all …]
|
D | hinic_rx.h | 44 void hinic_rxq_clean_stats(struct hinic_rxq *rxq); 46 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); 48 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, 51 void hinic_clean_rxq(struct hinic_rxq *rxq);
|
/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | rx.c | 119 static int iwl_rxq_space(const struct iwl_rxq *rxq) in iwl_rxq_space() argument 122 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); in iwl_rxq_space() 130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); in iwl_rxq_space() 167 struct iwl_rxq *rxq) in iwl_pcie_rxq_inc_wr_ptr() argument 171 lockdep_assert_held(&rxq->lock); in iwl_pcie_rxq_inc_wr_ptr() 187 rxq->need_update = true; in iwl_pcie_rxq_inc_wr_ptr() 192 rxq->write_actual = round_down(rxq->write, 8); in iwl_pcie_rxq_inc_wr_ptr() 194 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), in iwl_pcie_rxq_inc_wr_ptr() 195 rxq->write_actual); in iwl_pcie_rxq_inc_wr_ptr() 197 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); in iwl_pcie_rxq_inc_wr_ptr() [all …]
|
/drivers/infiniband/hw/hfi1/ |
D | netdev_rx.c | 194 rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq), in hfi1_netdev_rxq_init() 197 if (!rx->rxq) { in hfi1_netdev_rxq_init() 203 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init() local 205 rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd); in hfi1_netdev_rxq_init() 209 hfi1_rcd_get(rxq->rcd); in hfi1_netdev_rxq_init() 210 rxq->rx = rx; in hfi1_netdev_rxq_init() 211 rxq->rcd->napi = &rxq->napi; in hfi1_netdev_rxq_init() 213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init() 218 set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state); in hfi1_netdev_rxq_init() 219 netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64); in hfi1_netdev_rxq_init() [all …]
|
D | vnic_main.c | 292 static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq, in hfi1_vnic_decap_skb() argument 295 struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; in hfi1_vnic_decap_skb() 303 vinfo->stats[rxq->idx].rx_oversize++; in hfi1_vnic_decap_skb() 305 vinfo->stats[rxq->idx].rx_runt++; in hfi1_vnic_decap_skb() 336 struct hfi1_vnic_rx_queue *rxq; in hfi1_vnic_bypass_rcv() local 370 rxq = &vinfo->rxq[q_idx]; in hfi1_vnic_bypass_rcv() 389 rc = hfi1_vnic_decap_skb(rxq, skb); in hfi1_vnic_bypass_rcv() 392 hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc); in hfi1_vnic_bypass_rcv() 399 skb->protocol = eth_type_trans(skb, rxq->netdev); in hfi1_vnic_bypass_rcv() 401 napi_gro_receive(&rxq->napi, skb); in hfi1_vnic_bypass_rcv() [all …]
|
/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 26 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) in qede_alloc_rx_buffer() argument 37 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer() 38 rxq->filled_buffers--; in qede_alloc_rx_buffer() 49 mapping = dma_map_page(rxq->dev, data, 0, in qede_alloc_rx_buffer() 50 PAGE_SIZE, rxq->data_direction); in qede_alloc_rx_buffer() 51 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { in qede_alloc_rx_buffer() 56 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer() 62 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); in qede_alloc_rx_buffer() 66 rxq->rx_headroom); in qede_alloc_rx_buffer() 68 rxq->sw_rx_prod++; in qede_alloc_rx_buffer() [all …]
|
D | qede_main.c | 891 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) in qede_free_fp_array() 892 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq); in qede_free_fp_array() 893 kfree(fp->rxq); in qede_free_fp_array() 962 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); in qede_alloc_fp_array() 963 if (!fp->rxq) in qede_alloc_fp_array() 1455 struct qede_rx_queue *rxq) in qede_free_rx_buffers() argument 1459 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { in qede_free_rx_buffers() 1463 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; in qede_free_rx_buffers() 1467 rx_buf->mapping, PAGE_SIZE, rxq->data_direction); in qede_free_rx_buffers() 1474 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) in qede_free_mem_rxq() argument [all …]
|
/drivers/net/ethernet/marvell/ |
D | mvneta.c | 106 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) argument 131 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) argument 820 struct mvneta_rx_queue *rxq, in mvneta_rxq_non_occup_desc_add() argument 827 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add() 833 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add() 839 struct mvneta_rx_queue *rxq) in mvneta_rxq_busy_desc_num_get() argument 843 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get() 851 struct mvneta_rx_queue *rxq, in mvneta_rxq_desc_num_update() argument 859 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update() 879 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update() [all …]
|
D | mv643xx_eth.c | 398 struct rx_queue rxq[8]; member 440 static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) in rxq_to_mp() argument 442 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); in rxq_to_mp() 450 static void rxq_enable(struct rx_queue *rxq) in rxq_enable() argument 452 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_enable() 453 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); in rxq_enable() 456 static void rxq_disable(struct rx_queue *rxq) in rxq_disable() argument 458 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_disable() 459 u8 mask = 1 << rxq->index; in rxq_disable() 505 static int rxq_process(struct rx_queue *rxq, int budget) in rxq_process() argument [all …]
|
/drivers/atm/ |
D | ambassador.c | 673 amb_rxq * rxq = &dev->rxq[pool]; in rx_give() local 678 spin_lock_irqsave (&rxq->lock, flags); in rx_give() 680 if (rxq->pending < rxq->maximum) { in rx_give() 681 PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr); in rx_give() 683 *rxq->in.ptr = *rx; in rx_give() 684 rxq->pending++; in rx_give() 685 rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit); in rx_give() 687 wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); in rx_give() 689 spin_unlock_irqrestore (&rxq->lock, flags); in rx_give() 692 spin_unlock_irqrestore (&rxq->lock, flags); in rx_give() [all …]
|
/drivers/net/ethernet/microsoft/mana/ |
D | mana_en.c | 889 static void mana_post_pkt_rxq(struct mana_rxq *rxq) in mana_post_pkt_rxq() argument 895 curr_index = rxq->buf_index++; in mana_post_pkt_rxq() 896 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq() 897 rxq->buf_index = 0; in mana_post_pkt_rxq() 899 recv_buf_oob = &rxq->rx_oobs[curr_index]; in mana_post_pkt_rxq() 901 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq() 910 struct mana_rxq *rxq) in mana_rx_skb() argument 912 struct mana_stats *rx_stats = &rxq->stats; in mana_rx_skb() 913 struct net_device *ndev = rxq->ndev; in mana_rx_skb() 915 u16 rxq_idx = rxq->rxq_idx; in mana_rx_skb() [all …]
|
/drivers/vdpa/vdpa_sim/ |
D | vdpa_sim_net.c | 42 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; in vdpasim_net_work() local 53 if (!txq->ready || !rxq->ready) in vdpasim_net_work() 63 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov, in vdpasim_net_work() 64 &rxq->head, GFP_ATOMIC); in vdpasim_net_work() 77 write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov, in vdpasim_net_work() 89 vringh_complete_iotlb(&rxq->vring, rxq->head, total_write); in vdpasim_net_work() 97 if (vringh_need_notify_iotlb(&rxq->vring) > 0) in vdpasim_net_work() 98 vringh_notify(&rxq->vring); in vdpasim_net_work()
|
/drivers/net/ethernet/atheros/alx/ |
D | main.c | 74 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq; in alx_refill_rx_ring() local 80 next = cur = rxq->write_idx; in alx_refill_rx_ring() 83 cur_buf = &rxq->bufs[cur]; in alx_refill_rx_ring() 85 while (!cur_buf->skb && next != rxq->read_idx) { in alx_refill_rx_ring() 86 struct alx_rfd *rfd = &rxq->rfd[cur]; in alx_refill_rx_ring() 128 cur_buf = &rxq->bufs[cur]; in alx_refill_rx_ring() 135 rxq->write_idx = cur; in alx_refill_rx_ring() 217 static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget) in alx_clean_rx_irq() argument 226 alx = netdev_priv(rxq->netdev); in alx_clean_rx_irq() 229 rrd = &rxq->rrd[rxq->rrd_read_idx]; in alx_clean_rx_irq() [all …]
|
/drivers/net/ethernet/hisilicon/ |
D | hisi_femac.c | 121 struct hisi_femac_queue rxq; member 212 struct hisi_femac_queue *rxq = &priv->rxq; in hisi_femac_rx_refill() local 218 pos = rxq->head; in hisi_femac_rx_refill() 220 if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) in hisi_femac_rx_refill() 222 if (unlikely(rxq->skb[pos])) { in hisi_femac_rx_refill() 224 pos, rxq->skb[pos]); in hisi_femac_rx_refill() 237 rxq->dma_phys[pos] = addr; in hisi_femac_rx_refill() 238 rxq->skb[pos] = skb; in hisi_femac_rx_refill() 240 pos = (pos + 1) % rxq->num; in hisi_femac_rx_refill() 242 rxq->head = pos; in hisi_femac_rx_refill() [all …]
|
/drivers/net/wireless/intel/iwlegacy/ |
D | 3945-mac.c | 929 struct il_rx_queue *rxq = &il->rxq; in il3945_rx_queue_restock() local 934 spin_lock_irqsave(&rxq->lock, flags); in il3945_rx_queue_restock() 935 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { in il3945_rx_queue_restock() 937 element = rxq->rx_free.next; in il3945_rx_queue_restock() 942 rxq->bd[rxq->write] = in il3945_rx_queue_restock() 944 rxq->queue[rxq->write] = rxb; in il3945_rx_queue_restock() 945 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; in il3945_rx_queue_restock() 946 rxq->free_count--; in il3945_rx_queue_restock() 948 spin_unlock_irqrestore(&rxq->lock, flags); in il3945_rx_queue_restock() 951 if (rxq->free_count <= RX_LOW_WATERMARK) in il3945_rx_queue_restock() [all …]
|
D | 4965-mac.c | 85 il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) in il4965_rx_queue_reset() argument 89 spin_lock_irqsave(&rxq->lock, flags); in il4965_rx_queue_reset() 90 INIT_LIST_HEAD(&rxq->rx_free); in il4965_rx_queue_reset() 91 INIT_LIST_HEAD(&rxq->rx_used); in il4965_rx_queue_reset() 96 if (rxq->pool[i].page != NULL) { in il4965_rx_queue_reset() 98 rxq->pool[i].page_dma, in il4965_rx_queue_reset() 101 __il_free_pages(il, rxq->pool[i].page); in il4965_rx_queue_reset() 102 rxq->pool[i].page = NULL; in il4965_rx_queue_reset() 104 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); in il4965_rx_queue_reset() 108 rxq->queue[i] = NULL; in il4965_rx_queue_reset() [all …]
|
/drivers/vhost/ |
D | net.c | 126 struct vhost_net_buf rxq; member 151 static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq) in vhost_net_buf_get_ptr() argument 153 if (rxq->tail != rxq->head) in vhost_net_buf_get_ptr() 154 return rxq->queue[rxq->head]; in vhost_net_buf_get_ptr() 159 static int vhost_net_buf_get_size(struct vhost_net_buf *rxq) in vhost_net_buf_get_size() argument 161 return rxq->tail - rxq->head; in vhost_net_buf_get_size() 164 static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq) in vhost_net_buf_is_empty() argument 166 return rxq->tail == rxq->head; in vhost_net_buf_is_empty() 169 static void *vhost_net_buf_consume(struct vhost_net_buf *rxq) in vhost_net_buf_consume() argument 171 void *ret = vhost_net_buf_get_ptr(rxq); in vhost_net_buf_consume() [all …]
|
/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | cxgb4vf_main.c | 383 int rxq, msi, err; in request_msix_queue_irqs() local 397 for_each_ethrxq(s, rxq) { in request_msix_queue_irqs() 401 &s->ethrxq[rxq].rspq); in request_msix_queue_irqs() 409 while (--rxq >= 0) in request_msix_queue_irqs() 410 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); in request_msix_queue_irqs() 421 int rxq, msi; in free_msix_queue_irqs() local 425 for_each_ethrxq(s, rxq) in free_msix_queue_irqs() 427 &s->ethrxq[rxq].rspq); in free_msix_queue_irqs() 452 int rxq; in enable_rx() local 455 for_each_ethrxq(s, rxq) in enable_rx() [all …]
|
D | sge.c | 1567 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, in do_gro() argument 1570 struct adapter *adapter = rxq->rspq.adapter; in do_gro() 1576 skb = napi_get_frags(&rxq->rspq.napi); in do_gro() 1579 rxq->stats.rx_drops++; in do_gro() 1588 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro() 1594 rxq->stats.vlan_ex++; in do_gro() 1596 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro() 1599 rxq->stats.lro_pkts++; in do_gro() 1601 rxq->stats.lro_merged++; in do_gro() 1602 rxq->stats.pkts++; in do_gro() [all …]
|
/drivers/net/ethernet/alacritech/ |
D | slicoss.c | 120 static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq) in slic_get_free_rx_descs() argument 122 return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len); in slic_get_free_rx_descs() 396 struct slic_rx_queue *rxq = &sdev->rxq; in slic_refill_rx_queue() local 405 while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) { in slic_refill_rx_queue() 437 buff = &rxq->rxbuffs[rxq->put_idx]; in slic_refill_rx_queue() 446 rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len); in slic_refill_rx_queue() 549 struct slic_rx_queue *rxq = &sdev->rxq; in slic_handle_receive() local 559 while (todo && (rxq->done_idx != rxq->put_idx)) { in slic_handle_receive() 560 buff = &rxq->rxbuffs[rxq->done_idx]; in slic_handle_receive() 613 rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len); in slic_handle_receive() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | ktls_rx.c | 54 u32 rxq; member 344 c = resync->priv->channels.c[priv_rx->rxq]; in resync_handle_work() 532 c = priv->channels.c[priv_rx->rxq]; in mlx5e_ktls_rx_resync() 574 int rxq = sk_rx_queue_get(sk); in mlx5e_ktls_sk_get_rxq() local 576 if (unlikely(rxq == -1)) in mlx5e_ktls_sk_get_rxq() 577 rxq = 0; in mlx5e_ktls_sk_get_rxq() 579 return rxq; in mlx5e_ktls_sk_get_rxq() 591 int rxq, err; in mlx5e_ktls_add_rx() local 609 rxq = mlx5e_ktls_sk_get_rxq(sk); in mlx5e_ktls_add_rx() 610 priv_rx->rxq = rxq; in mlx5e_ktls_add_rx() [all …]
|
/drivers/net/ethernet/marvell/mvpp2/ |
D | mvpp2_main.c | 1206 int rxq; in mvpp2_swf_bm_pool_init_shared() local 1229 for (rxq = 0; rxq < port->nrxqs; rxq++) in mvpp2_swf_bm_pool_init_shared() 1230 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); in mvpp2_swf_bm_pool_init_shared() 1242 for (rxq = 0; rxq < port->nrxqs; rxq++) in mvpp2_swf_bm_pool_init_shared() 1243 mvpp2_rxq_short_pool_set(port, rxq, in mvpp2_swf_bm_pool_init_shared() 2427 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) in mvpp2_rxq_next_desc_get() argument 2429 int rx_desc = rxq->next_desc_to_proc; in mvpp2_rxq_next_desc_get() 2431 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); in mvpp2_rxq_next_desc_get() 2432 prefetch(rxq->descs + rxq->next_desc_to_proc); in mvpp2_rxq_next_desc_get() 2433 return rxq->descs + rx_desc; in mvpp2_rxq_next_desc_get() [all …]
|
/drivers/net/ethernet/chelsio/cxgb4/ |
D | cudbg_lib.h | 253 static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq, in cudbg_fill_qdesc_rxq() argument 258 entry->qid = rxq->cntxt_id; in cudbg_fill_qdesc_rxq() 259 entry->desc_size = rxq->iqe_len; in cudbg_fill_qdesc_rxq() 260 entry->num_desc = rxq->size; in cudbg_fill_qdesc_rxq() 261 entry->data_size = rxq->size * rxq->iqe_len; in cudbg_fill_qdesc_rxq() 262 memcpy(entry->data, rxq->desc, entry->data_size); in cudbg_fill_qdesc_rxq()
|
/drivers/net/ethernet/samsung/sxgbe/ |
D | sxgbe_main.c | 326 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i], in sxgbe_clear_descriptors() 586 priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings() 595 priv->rxq[queue_num]->priv_ptr = priv; in init_dma_desc_rings() 609 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings() 661 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); in free_dma_desc_resources() 684 priv->rxq[queue_num] = devm_kmalloc(priv->device, in rxring_mem_alloc() 686 if (!priv->rxq[queue_num]) in rxring_mem_alloc() 967 (priv->rxq[queue_num])->dma_rx_phy, in sxgbe_init_dma_engine() 1141 (priv->rxq[queue_num])->irq_no, in sxgbe_open() 1143 dev->name, priv->rxq[queue_num]); in sxgbe_open() [all …]
|
/drivers/target/iscsi/cxgbit/ |
D | cxgbit_main.c | 101 spin_lock_bh(&csk->rxq.lock); in cxgbit_close_conn() 102 __skb_queue_tail(&csk->rxq, skb); in cxgbit_close_conn() 103 if (skb_queue_len(&csk->rxq) == 1) in cxgbit_close_conn() 105 spin_unlock_bh(&csk->rxq.lock); in cxgbit_close_conn() 337 spin_lock(&csk->rxq.lock); in cxgbit_queue_lro_skb() 338 __skb_queue_tail(&csk->rxq, skb); in cxgbit_queue_lro_skb() 339 if (skb_queue_len(&csk->rxq) == 1) in cxgbit_queue_lro_skb() 341 spin_unlock(&csk->rxq.lock); in cxgbit_queue_lro_skb() 566 spin_lock(&csk->rxq.lock); in cxgbit_update_dcb_priority() 567 __skb_queue_tail(&csk->rxq, skb); in cxgbit_update_dcb_priority() [all …]
|