• Home
  • Raw
  • Download

Lines Matching +full:1 +full:q

66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
139 dma_addr_t addr[MAX_SKB_FRAGS + 1];
146 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
152 #if SGE_NUM_GENBITS == 1
153 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
158 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
163 # error "SGE_NUM_GENBITS must be 1 or 2"
167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) in fl_to_qset() argument
169 return container_of(q, struct sge_qset, fl[qidx]); in fl_to_qset()
172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument
174 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
185 * @q: the response queue to replenish
192 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
208 return 1; in need_skb_unmap()
217 * @q: the Tx queue containing Tx descriptors for the packet
236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
240 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb()
249 j = 1; in unmap_skb()
252 curflit = d->sflit + 1 + j; in unmap_skb()
259 j ^= 1; in unmap_skb()
269 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb()
272 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ in unmap_skb()
279 * @q: the Tx queue to reclaim descriptors from
285 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, in free_tx_desc() argument
290 unsigned int cidx = q->cidx; in free_tx_desc()
293 q->cntxt_id >= FW_TUNNEL_SGEEC_START; in free_tx_desc()
295 d = &q->sdesc[cidx]; in free_tx_desc()
299 unmap_skb(d->skb, q, cidx, pdev); in free_tx_desc()
306 if (++cidx == q->size) { in free_tx_desc()
308 d = q->sdesc; in free_tx_desc()
311 q->cidx = cidx; in free_tx_desc()
317 * @q: the Tx queue to reclaim completed descriptors from
325 struct sge_txq *q, in reclaim_completed_tx() argument
328 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx()
332 free_tx_desc(adapter, q, reclaim); in reclaim_completed_tx()
333 q->cleaned += reclaim; in reclaim_completed_tx()
334 q->in_use -= reclaim; in reclaim_completed_tx()
336 return q->processed - q->cleaned; in reclaim_completed_tx()
341 * @q: the Tx queue
345 static inline int should_restart_tx(const struct sge_txq *q) in should_restart_tx() argument
347 unsigned int r = q->processed - q->cleaned; in should_restart_tx()
349 return q->in_use - r < (q->size >> 1); in should_restart_tx()
352 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, in clear_rx_desc() argument
355 if (q->use_pages && d->pg_chunk.page) { in clear_rx_desc()
360 q->alloc_size, PCI_DMA_FROMDEVICE); in clear_rx_desc()
366 q->buf_size, PCI_DMA_FROMDEVICE); in clear_rx_desc()
375 * @q: the SGE free list to clean up
380 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) in free_rx_bufs() argument
382 unsigned int cidx = q->cidx; in free_rx_bufs()
384 while (q->credits--) { in free_rx_bufs()
385 struct rx_sw_desc *d = &q->sdesc[cidx]; in free_rx_bufs()
388 clear_rx_desc(pdev, q, d); in free_rx_bufs()
389 if (++cidx == q->size) in free_rx_bufs()
393 if (q->pg_chunk.page) { in free_rx_bufs()
394 __free_pages(q->pg_chunk.page, q->order); in free_rx_bufs()
395 q->pg_chunk.page = NULL; in free_rx_bufs()
442 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, in alloc_pg_chunk() argument
446 if (!q->pg_chunk.page) { in alloc_pg_chunk()
449 q->pg_chunk.page = alloc_pages(gfp, order); in alloc_pg_chunk()
450 if (unlikely(!q->pg_chunk.page)) in alloc_pg_chunk()
452 q->pg_chunk.va = page_address(q->pg_chunk.page); in alloc_pg_chunk()
453 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - in alloc_pg_chunk()
455 q->pg_chunk.offset = 0; in alloc_pg_chunk()
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, in alloc_pg_chunk()
457 0, q->alloc_size, PCI_DMA_FROMDEVICE); in alloc_pg_chunk()
459 __free_pages(q->pg_chunk.page, order); in alloc_pg_chunk()
460 q->pg_chunk.page = NULL; in alloc_pg_chunk()
463 q->pg_chunk.mapping = mapping; in alloc_pg_chunk()
465 sd->pg_chunk = q->pg_chunk; in alloc_pg_chunk()
469 q->pg_chunk.offset += q->buf_size; in alloc_pg_chunk()
470 if (q->pg_chunk.offset == (PAGE_SIZE << order)) in alloc_pg_chunk()
471 q->pg_chunk.page = NULL; in alloc_pg_chunk()
473 q->pg_chunk.va += q->buf_size; in alloc_pg_chunk()
474 get_page(q->pg_chunk.page); in alloc_pg_chunk()
478 *sd->pg_chunk.p_cnt = 1; in alloc_pg_chunk()
480 *sd->pg_chunk.p_cnt += 1; in alloc_pg_chunk()
485 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
487 if (q->pend_cred >= q->credits / 4) { in ring_fl_db()
488 q->pend_cred = 0; in ring_fl_db()
490 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); in ring_fl_db()
497 * @q: the free-list to refill
505 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) in refill_fl() argument
507 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
508 struct rx_desc *d = &q->desc[q->pidx]; in refill_fl()
515 if (q->use_pages) { in refill_fl()
516 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, in refill_fl()
517 q->order))) { in refill_fl()
518 nomem: q->alloc_failed++; in refill_fl()
524 add_one_rx_chunk(mapping, d, q->gen); in refill_fl()
526 q->buf_size - SGE_PG_RSVD, in refill_fl()
531 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); in refill_fl()
537 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, in refill_fl()
538 q->gen, adap->pdev); in refill_fl()
540 clear_rx_desc(adap->pdev, q, sd); in refill_fl()
547 if (++q->pidx == q->size) { in refill_fl()
548 q->pidx = 0; in refill_fl()
549 q->gen ^= 1; in refill_fl()
550 sd = q->sdesc; in refill_fl()
551 d = q->desc; in refill_fl()
556 q->credits += count; in refill_fl()
557 q->pend_cred += count; in refill_fl()
558 ring_fl_db(adap, q); in refill_fl()
572 * @q: the SGE free list
578 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, in recycle_rx_buf() argument
581 struct rx_desc *from = &q->desc[idx]; in recycle_rx_buf()
582 struct rx_desc *to = &q->desc[q->pidx]; in recycle_rx_buf()
584 q->sdesc[q->pidx] = q->sdesc[idx]; in recycle_rx_buf()
588 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); in recycle_rx_buf()
589 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); in recycle_rx_buf()
591 if (++q->pidx == q->size) { in recycle_rx_buf()
592 q->pidx = 0; in recycle_rx_buf()
593 q->gen ^= 1; in recycle_rx_buf()
596 q->credits++; in recycle_rx_buf()
597 q->pend_cred++; in recycle_rx_buf()
598 ring_fl_db(adap, q); in recycle_rx_buf()
641 * @q: the queue set
647 static void t3_reset_qset(struct sge_qset *q) in t3_reset_qset() argument
649 if (q->adap && in t3_reset_qset()
650 !(q->adap->flags & NAPI_INIT)) { in t3_reset_qset()
651 memset(q, 0, sizeof(*q)); in t3_reset_qset()
655 q->adap = NULL; in t3_reset_qset()
656 memset(&q->rspq, 0, sizeof(q->rspq)); in t3_reset_qset()
657 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); in t3_reset_qset()
658 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
659 q->txq_stopped = 0; in t3_reset_qset()
660 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ in t3_reset_qset()
661 q->rx_reclaim_timer.function = NULL; in t3_reset_qset()
662 q->nomem = 0; in t3_reset_qset()
663 napi_free_frags(&q->napi); in t3_reset_qset()
670 * @q: the queue set
676 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) in t3_free_qset() argument
682 if (q->fl[i].desc) { in t3_free_qset()
684 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); in t3_free_qset()
686 free_rx_bufs(pdev, &q->fl[i]); in t3_free_qset()
687 kfree(q->fl[i].sdesc); in t3_free_qset()
689 q->fl[i].size * in t3_free_qset()
690 sizeof(struct rx_desc), q->fl[i].desc, in t3_free_qset()
691 q->fl[i].phys_addr); in t3_free_qset()
695 if (q->txq[i].desc) { in t3_free_qset()
697 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
699 if (q->txq[i].sdesc) { in t3_free_qset()
700 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
701 q->txq[i].in_use); in t3_free_qset()
702 kfree(q->txq[i].sdesc); in t3_free_qset()
705 q->txq[i].size * in t3_free_qset()
707 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
708 __skb_queue_purge(&q->txq[i].sendq); in t3_free_qset()
711 if (q->rspq.desc) { in t3_free_qset()
713 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); in t3_free_qset()
716 q->rspq.size * sizeof(struct rsp_desc), in t3_free_qset()
717 q->rspq.desc, q->rspq.phys_addr); in t3_free_qset()
720 t3_reset_qset(q); in t3_free_qset()
734 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt()
751 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ in sgl_len()
752 return (3 * n) / 2 + (n & 1); in sgl_len()
811 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), in get_packet()
828 * @q: the queue
844 struct sge_rspq *q, unsigned int len, in get_packet_pg() argument
852 newskb = skb = q->pg_skb; in get_packet_pg()
868 q->rx_recycle_buf++; in get_packet_pg()
872 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) in get_packet_pg()
949 return 1; in calc_tx_descs()
951 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; in calc_tx_descs()
995 pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); in map_skb()
1029 j ^= 1; in write_sgl()
1041 * @q: the Tx queue
1050 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) in check_ring_tx_db() argument
1053 clear_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1054 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { in check_ring_tx_db()
1055 set_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1057 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1062 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1069 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); in wr_gen2()
1079 * @q: the SGE Tx queue
1094 const struct sge_txq *q, in write_wr_hdr_sgl() argument
1101 struct tx_sw_desc *sd = &q->sdesc[pidx]; in write_wr_hdr_sgl()
1110 if (likely(ndesc == 1)) { in write_wr_hdr_sgl()
1111 sd->eop = 1; in write_wr_hdr_sgl()
1112 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1123 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1141 if (++pidx == q->size) { in write_wr_hdr_sgl()
1143 gen ^= 1; in write_wr_hdr_sgl()
1144 d = q->desc; in write_wr_hdr_sgl()
1145 sd = q->sdesc; in write_wr_hdr_sgl()
1150 wrp->wr_hi = htonl(V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1151 V_WR_SGLSFLT(1)) | wr_hi; in write_wr_hdr_sgl()
1153 sgl_flits + 1)) | in write_wr_hdr_sgl()
1156 flits = 1; in write_wr_hdr_sgl()
1158 sd->eop = 1; in write_wr_hdr_sgl()
1174 * @q: the Tx queue
1184 struct sge_txq *q, unsigned int ndesc, in write_tx_pkt_wr() argument
1188 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; in write_tx_pkt_wr()
1189 struct tx_desc *d = &q->desc[pidx]; in write_tx_pkt_wr()
1220 q->sdesc[pidx].skb = NULL; in write_tx_pkt_wr()
1233 V_WR_TID(q->token)); in write_tx_pkt_wr()
1242 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; in write_tx_pkt_wr()
1245 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, in write_tx_pkt_wr()
1247 htonl(V_WR_TID(q->token))); in write_tx_pkt_wr()
1251 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument
1255 q->stops++; in t3_stop_tx_queue()
1273 struct sge_txq *q; in t3_eth_xmit() local
1274 dma_addr_t addr[MAX_SKB_FRAGS + 1]; in t3_eth_xmit()
1287 q = &qs->txq[TXQ_ETH]; in t3_eth_xmit()
1290 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in t3_eth_xmit()
1292 credits = q->size - q->in_use; in t3_eth_xmit()
1296 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1299 dev->name, q->cntxt_id & 7); in t3_eth_xmit()
1311 q->in_use += ndesc; in t3_eth_xmit()
1312 if (unlikely(credits - ndesc < q->stop_thres)) { in t3_eth_xmit()
1313 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1315 if (should_restart_tx(q) && in t3_eth_xmit()
1317 q->restarts++; in t3_eth_xmit()
1322 gen = q->gen; in t3_eth_xmit()
1323 q->unacked += ndesc; in t3_eth_xmit()
1324 compl = (q->unacked & 8) << (S_WR_COMPL - 3); in t3_eth_xmit()
1325 q->unacked &= 7; in t3_eth_xmit()
1326 pidx = q->pidx; in t3_eth_xmit()
1327 q->pidx += ndesc; in t3_eth_xmit()
1328 if (q->pidx >= q->size) { in t3_eth_xmit()
1329 q->pidx -= q->size; in t3_eth_xmit()
1330 q->gen ^= 1; in t3_eth_xmit()
1368 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); in t3_eth_xmit()
1369 check_ring_tx_db(adap, q); in t3_eth_xmit()
1392 memcpy(&to[1], &from[1], len - sizeof(*from)); in write_imm()
1394 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); in write_imm()
1408 * @q: the send queue
1418 * Returns 0 if enough descriptors are available, 1 if there aren't
1423 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, in check_desc_avail() argument
1427 if (unlikely(!skb_queue_empty(&q->sendq))) { in check_desc_avail()
1428 addq_exit:__skb_queue_tail(&q->sendq, skb); in check_desc_avail()
1429 return 1; in check_desc_avail()
1431 if (unlikely(q->size - q->in_use < ndesc)) { in check_desc_avail()
1432 struct sge_qset *qs = txq_to_qset(q, qid); in check_desc_avail()
1437 if (should_restart_tx(q) && in check_desc_avail()
1441 q->stops++; in check_desc_avail()
1449 * @q: the SGE control Tx queue
1455 static inline void reclaim_completed_tx_imm(struct sge_txq *q) in reclaim_completed_tx_imm() argument
1457 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx_imm()
1459 q->in_use -= reclaim; in reclaim_completed_tx_imm()
1460 q->cleaned += reclaim; in reclaim_completed_tx_imm()
1471 * @q: the control queue
1478 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, in ctrl_xmit() argument
1485 WARN_ON(1); in ctrl_xmit()
1491 wrp->wr_lo = htonl(V_WR_TID(q->token)); in ctrl_xmit()
1493 spin_lock(&q->lock); in ctrl_xmit()
1494 again:reclaim_completed_tx_imm(q); in ctrl_xmit()
1496 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); in ctrl_xmit()
1498 if (ret == 1) { in ctrl_xmit()
1499 spin_unlock(&q->lock); in ctrl_xmit()
1505 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in ctrl_xmit()
1507 q->in_use++; in ctrl_xmit()
1508 if (++q->pidx >= q->size) { in ctrl_xmit()
1509 q->pidx = 0; in ctrl_xmit()
1510 q->gen ^= 1; in ctrl_xmit()
1512 spin_unlock(&q->lock); in ctrl_xmit()
1515 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in ctrl_xmit()
1529 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in restart_ctrlq() local
1531 spin_lock(&q->lock); in restart_ctrlq()
1532 again:reclaim_completed_tx_imm(q); in restart_ctrlq()
1534 while (q->in_use < q->size && in restart_ctrlq()
1535 (skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1537 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in restart_ctrlq()
1539 if (++q->pidx >= q->size) { in restart_ctrlq()
1540 q->pidx = 0; in restart_ctrlq()
1541 q->gen ^= 1; in restart_ctrlq()
1543 q->in_use++; in restart_ctrlq()
1546 if (!skb_queue_empty(&q->sendq)) { in restart_ctrlq()
1550 if (should_restart_tx(q) && in restart_ctrlq()
1553 q->stops++; in restart_ctrlq()
1556 spin_unlock(&q->lock); in restart_ctrlq()
1559 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_ctrlq()
1613 *p++ = be64_to_cpu(sgl->addr[1]); in setup_deferred_unmapping()
1623 * @q: the Tx queue
1633 struct sge_txq *q, unsigned int pidx, in write_ofld_wr() argument
1639 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; in write_ofld_wr()
1640 struct tx_desc *d = &q->desc[pidx]; in write_ofld_wr()
1643 q->sdesc[pidx].skb = NULL; in write_ofld_wr()
1651 memcpy(&d->flit[1], &from[1], in write_ofld_wr()
1655 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; in write_ofld_wr()
1664 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, in write_ofld_wr()
1680 return 1; /* packet fits as immediate data */ in calc_tx_descs_ofld()
1692 * @q: the Tx offload queue
1697 static int ofld_xmit(struct adapter *adap, struct sge_txq *q, in ofld_xmit() argument
1703 spin_lock(&q->lock); in ofld_xmit()
1704 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in ofld_xmit()
1706 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); in ofld_xmit()
1708 if (ret == 1) { in ofld_xmit()
1710 spin_unlock(&q->lock); in ofld_xmit()
1718 spin_unlock(&q->lock); in ofld_xmit()
1722 gen = q->gen; in ofld_xmit()
1723 q->in_use += ndesc; in ofld_xmit()
1724 pidx = q->pidx; in ofld_xmit()
1725 q->pidx += ndesc; in ofld_xmit()
1726 if (q->pidx >= q->size) { in ofld_xmit()
1727 q->pidx -= q->size; in ofld_xmit()
1728 q->gen ^= 1; in ofld_xmit()
1730 spin_unlock(&q->lock); in ofld_xmit()
1732 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); in ofld_xmit()
1733 check_ring_tx_db(adap, q); in ofld_xmit()
1747 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in restart_offloadq() local
1752 spin_lock(&q->lock); in restart_offloadq()
1753 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in restart_offloadq()
1755 while ((skb = skb_peek(&q->sendq)) != NULL) { in restart_offloadq()
1759 if (unlikely(q->size - q->in_use < ndesc)) { in restart_offloadq()
1763 if (should_restart_tx(q) && in restart_offloadq()
1766 q->stops++; in restart_offloadq()
1774 gen = q->gen; in restart_offloadq()
1775 q->in_use += ndesc; in restart_offloadq()
1776 pidx = q->pidx; in restart_offloadq()
1777 q->pidx += ndesc; in restart_offloadq()
1779 if (q->pidx >= q->size) { in restart_offloadq()
1780 q->pidx -= q->size; in restart_offloadq()
1781 q->gen ^= 1; in restart_offloadq()
1783 __skb_unlink(skb, &q->sendq); in restart_offloadq()
1784 spin_unlock(&q->lock); in restart_offloadq()
1786 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, in restart_offloadq()
1788 spin_lock(&q->lock); in restart_offloadq()
1790 spin_unlock(&q->lock); in restart_offloadq()
1793 set_bit(TXQ_RUNNING, &q->flags); in restart_offloadq()
1794 set_bit(TXQ_LAST_PKT_DB, &q->flags); in restart_offloadq()
1799 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_offloadq()
1807 * set is carried in bits 1-3 in the packet's priority.
1811 return skb->priority >> 1; in queue_set()
1823 return skb->priority & 1; in is_ctrl_pkt()
1833 * should be sent as regular or control, bits 1-3 select the queue set.
1848 * @q: the SGE response queue
1855 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) in offload_enqueue() argument
1857 int was_empty = skb_queue_empty(&q->rx_queue); in offload_enqueue()
1859 __skb_queue_tail(&q->rx_queue, skb); in offload_enqueue()
1862 struct sge_qset *qs = rspq_to_qset(q); in offload_enqueue()
1871 * @q: the SGE response queue that assembled the bundle
1878 struct sge_rspq *q, in deliver_partial_bundle() argument
1882 q->offload_bundles++; in deliver_partial_bundle()
1901 struct sge_rspq *q = &qs->rspq; in ofld_poll() local
1910 spin_lock_irq(&q->lock); in ofld_poll()
1912 skb_queue_splice_init(&q->rx_queue, &queue); in ofld_poll()
1915 spin_unlock_irq(&q->lock); in ofld_poll()
1918 spin_unlock_irq(&q->lock); in ofld_poll()
1930 q->offload_bundles++; in ofld_poll()
1938 spin_lock_irq(&q->lock); in ofld_poll()
1939 skb_queue_splice(&queue, &q->rx_queue); in ofld_poll()
1940 spin_unlock_irq(&q->lock); in ofld_poll()
1942 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); in ofld_poll()
2036 arp_ptr = (unsigned char *)(arp + 1); in cxgb3_arp_process()
2283 * @q: the response queue
2289 const struct sge_rspq *q) in is_new_response() argument
2291 return (r->intr_gen & F_RSPD_GEN2) == q->gen; in is_new_response()
2294 static inline void clear_rspq_bufstate(struct sge_rspq * const q) in clear_rspq_bufstate() argument
2296 q->pg_skb = NULL; in clear_rspq_bufstate()
2297 q->rx_recycle_buf = 0; in clear_rspq_bufstate()
2327 struct sge_rspq *q = &qs->rspq; in process_responses() local
2328 struct rsp_desc *r = &q->desc[q->cidx]; in process_responses()
2334 q->next_holdoff = q->holdoff_tmr; in process_responses()
2336 while (likely(budget_left && is_new_response(r, q))) { in process_responses()
2357 q->async_notif++; in process_responses()
2362 q->next_holdoff = NOMEM_INTR_DELAY; in process_responses()
2363 q->nomem++; in process_responses()
2368 q->imm_data++; in process_responses()
2375 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; in process_responses()
2388 skb = get_packet_pg(adap, fl, q, in process_responses()
2392 q->pg_skb = skb; in process_responses()
2399 q->rx_drops++; in process_responses()
2406 q->pure_rsps++; in process_responses()
2414 if (unlikely(++q->cidx == q->size)) { in process_responses()
2415 q->cidx = 0; in process_responses()
2416 q->gen ^= 1; in process_responses()
2417 r = q->desc; in process_responses()
2421 if (++q->credits >= (q->size / 4)) { in process_responses()
2422 refill_rspq(adap, q, q->credits); in process_responses()
2423 q->credits = 0; in process_responses()
2432 rx_eth(adap, q, skb, ethpad, lro); in process_responses()
2434 q->offload_pkts++; in process_responses()
2438 ngathered = rx_offload(&adap->tdev, q, skb, in process_responses()
2444 clear_rspq_bufstate(q); in process_responses()
2449 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); in process_responses()
2526 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2531 struct sge_rspq *q = &qs->rspq; in process_pure_responses() local
2538 if (unlikely(++q->cidx == q->size)) { in process_pure_responses()
2539 q->cidx = 0; in process_pure_responses()
2540 q->gen ^= 1; in process_pure_responses()
2541 r = q->desc; in process_pure_responses()
2550 q->pure_rsps++; in process_pure_responses()
2551 if (++q->credits >= (q->size / 4)) { in process_pure_responses()
2552 refill_rspq(adap, q, q->credits); in process_pure_responses()
2553 q->credits = 0; in process_pure_responses()
2555 if (!is_new_response(r, q)) in process_pure_responses()
2567 return is_new_response(r, q); in process_pure_responses()
2573 * @q: the response queue
2576 * new SGE responses. If there are no new responses it returns -1. If
2580 * signaling responses it schedules the NAPI handler. Returns 1 if it
2585 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) in handle_responses() argument
2587 struct sge_qset *qs = rspq_to_qset(q); in handle_responses()
2588 struct rsp_desc *r = &q->desc[q->cidx]; in handle_responses()
2590 if (!is_new_response(r, q)) in handle_responses()
2591 return -1; in handle_responses()
2594 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in handle_responses()
2595 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); in handle_responses()
2599 return 1; in handle_responses()
2610 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix() local
2612 spin_lock(&q->lock); in t3_sge_intr_msix()
2613 if (process_responses(adap, qs, -1) == 0) in t3_sge_intr_msix()
2614 q->unhandled_irqs++; in t3_sge_intr_msix()
2615 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_sge_intr_msix()
2616 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_sge_intr_msix()
2617 spin_unlock(&q->lock); in t3_sge_intr_msix()
2628 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix_napi() local
2630 spin_lock(&q->lock); in t3_sge_intr_msix_napi()
2632 if (handle_responses(qs->adap, q) < 0) in t3_sge_intr_msix_napi()
2633 q->unhandled_irqs++; in t3_sge_intr_msix_napi()
2634 spin_unlock(&q->lock); in t3_sge_intr_msix_napi()
2648 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi() local
2650 spin_lock(&q->lock); in t3_intr_msi()
2652 if (process_responses(adap, &adap->sge.qs[0], -1)) { in t3_intr_msi()
2653 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_intr_msi()
2654 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_intr_msi()
2655 new_packets = 1; in t3_intr_msi()
2659 process_responses(adap, &adap->sge.qs[1], -1)) { in t3_intr_msi()
2660 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr_msi()
2665 new_packets = 1; in t3_intr_msi()
2669 q->unhandled_irqs++; in t3_intr_msi()
2671 spin_unlock(&q->lock); in t3_intr_msi()
2677 struct sge_rspq *q = &qs->rspq; in rspq_check_napi() local
2680 is_new_response(&q->desc[q->cidx], q)) { in rspq_check_napi()
2682 return 1; in rspq_check_napi()
2698 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi_napi() local
2700 spin_lock(&q->lock); in t3_intr_msi_napi()
2704 new_packets += rspq_check_napi(&adap->sge.qs[1]); in t3_intr_msi_napi()
2706 q->unhandled_irqs++; in t3_intr_msi_napi()
2708 spin_unlock(&q->lock); in t3_intr_msi_napi()
2720 work = process_responses(adap, rspq_to_qset(rq), -1); in process_responses_gts()
2737 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr()
2787 if (likely(map & 1)) in t3b_intr()
2791 process_responses_gts(adap, &adap->sge.qs[1].rspq); in t3b_intr()
2822 if (likely(map & 1)) in t3b_intr_napi()
2826 napi_schedule(&adap->sge.qs[1].napi); in t3b_intr_napi()
2982 if (status & (1 << qs->rspq.cntxt_id)) { in sge_timer_rx()
2986 refill_rspq(adap, &qs->rspq, 1); in sge_timer_rx()
2989 1 << qs->rspq.cntxt_id); in sge_timer_rx()
2996 if (qs->fl[1].credits < qs->fl[1].size) in sge_timer_rx()
2997 __refill_fl(adap, &qs->fl[1]); in sge_timer_rx()
3015 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ in t3_update_qset_coalesce()
3042 struct sge_qset *q = &adapter->sge.qs[id]; in t3_sge_alloc_qset() local
3044 init_qset_cntxt(q, id); in t3_sge_alloc_qset()
3045 timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0); in t3_sge_alloc_qset()
3046 timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0); in t3_sge_alloc_qset()
3048 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, in t3_sge_alloc_qset()
3051 &q->fl[0].phys_addr, &q->fl[0].sdesc); in t3_sge_alloc_qset()
3052 if (!q->fl[0].desc) in t3_sge_alloc_qset()
3055 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, in t3_sge_alloc_qset()
3058 &q->fl[1].phys_addr, &q->fl[1].sdesc); in t3_sge_alloc_qset()
3059 if (!q->fl[1].desc) in t3_sge_alloc_qset()
3062 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, in t3_sge_alloc_qset()
3064 &q->rspq.phys_addr, NULL); in t3_sge_alloc_qset()
3065 if (!q->rspq.desc) in t3_sge_alloc_qset()
3075 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], in t3_sge_alloc_qset()
3077 &q->txq[i].phys_addr, in t3_sge_alloc_qset()
3078 &q->txq[i].sdesc); in t3_sge_alloc_qset()
3079 if (!q->txq[i].desc) in t3_sge_alloc_qset()
3082 q->txq[i].gen = 1; in t3_sge_alloc_qset()
3083 q->txq[i].size = p->txq_size[i]; in t3_sge_alloc_qset()
3084 spin_lock_init(&q->txq[i].lock); in t3_sge_alloc_qset()
3085 skb_queue_head_init(&q->txq[i].sendq); in t3_sge_alloc_qset()
3088 tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq); in t3_sge_alloc_qset()
3089 tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq); in t3_sge_alloc_qset()
3091 q->fl[0].gen = q->fl[1].gen = 1; in t3_sge_alloc_qset()
3092 q->fl[0].size = p->fl_size; in t3_sge_alloc_qset()
3093 q->fl[1].size = p->jumbo_size; in t3_sge_alloc_qset()
3095 q->rspq.gen = 1; in t3_sge_alloc_qset()
3096 q->rspq.size = p->rspq_size; in t3_sge_alloc_qset()
3097 spin_lock_init(&q->rspq.lock); in t3_sge_alloc_qset()
3098 skb_queue_head_init(&q->rspq.rx_queue); in t3_sge_alloc_qset()
3100 q->txq[TXQ_ETH].stop_thres = nports * in t3_sge_alloc_qset()
3101 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); in t3_sge_alloc_qset()
3104 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3106 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); in t3_sge_alloc_qset()
3109 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3111 q->fl[1].buf_size = is_offload(adapter) ? in t3_sge_alloc_qset()
3116 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3117 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3118 q->fl[0].order = FL0_PG_ORDER; in t3_sge_alloc_qset()
3119 q->fl[1].order = FL1_PG_ORDER; in t3_sge_alloc_qset()
3120 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3121 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3126 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, in t3_sge_alloc_qset()
3127 q->rspq.phys_addr, q->rspq.size, in t3_sge_alloc_qset()
3128 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); in t3_sge_alloc_qset()
3133 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, in t3_sge_alloc_qset()
3134 q->fl[i].phys_addr, q->fl[i].size, in t3_sge_alloc_qset()
3135 q->fl[i].buf_size - SGE_PG_RSVD, in t3_sge_alloc_qset()
3136 p->cong_thres, 1, 0); in t3_sge_alloc_qset()
3141 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, in t3_sge_alloc_qset()
3142 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, in t3_sge_alloc_qset()
3143 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, in t3_sge_alloc_qset()
3144 1, 0); in t3_sge_alloc_qset()
3148 if (ntxq > 1) { in t3_sge_alloc_qset()
3149 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, in t3_sge_alloc_qset()
3151 q->txq[TXQ_OFLD].phys_addr, in t3_sge_alloc_qset()
3152 q->txq[TXQ_OFLD].size, 0, 1, 0); in t3_sge_alloc_qset()
3158 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, in t3_sge_alloc_qset()
3160 q->txq[TXQ_CTRL].phys_addr, in t3_sge_alloc_qset()
3161 q->txq[TXQ_CTRL].size, in t3_sge_alloc_qset()
3162 q->txq[TXQ_CTRL].token, 1, 0); in t3_sge_alloc_qset()
3169 q->adap = adapter; in t3_sge_alloc_qset()
3170 q->netdev = dev; in t3_sge_alloc_qset()
3171 q->tx_q = netdevq; in t3_sge_alloc_qset()
3172 t3_update_qset_coalesce(q, p); in t3_sge_alloc_qset()
3174 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, in t3_sge_alloc_qset()
3181 if (avail < q->fl[0].size) in t3_sge_alloc_qset()
3185 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, in t3_sge_alloc_qset()
3187 if (avail < q->fl[1].size) in t3_sge_alloc_qset()
3188 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", in t3_sge_alloc_qset()
3190 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); in t3_sge_alloc_qset()
3192 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | in t3_sge_alloc_qset()
3193 V_NEWTIMER(q->rspq.holdoff_tmr)); in t3_sge_alloc_qset()
3200 t3_free_qset(adapter, q); in t3_sge_alloc_qset()
3215 struct sge_qset *q = &adap->sge.qs[i]; in t3_start_sge_timers() local
3217 if (q->tx_reclaim_timer.function) in t3_start_sge_timers()
3218 mod_timer(&q->tx_reclaim_timer, in t3_start_sge_timers()
3221 if (q->rx_reclaim_timer.function) in t3_start_sge_timers()
3222 mod_timer(&q->rx_reclaim_timer, in t3_start_sge_timers()
3238 struct sge_qset *q = &adap->sge.qs[i]; in t3_stop_sge_timers() local
3240 if (q->tx_reclaim_timer.function) in t3_stop_sge_timers()
3241 del_timer_sync(&q->tx_reclaim_timer); in t3_stop_sge_timers()
3242 if (q->rx_reclaim_timer.function) in t3_stop_sge_timers()
3243 del_timer_sync(&q->rx_reclaim_timer); in t3_stop_sge_timers()
3328 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; in t3_sge_init()
3329 #if SGE_NUM_GENBITS == 1 in t3_sge_init()
3368 struct qset_params *q = p->qset + i; in t3_sge_prep() local
3370 q->polling = adap->params.rev > 0; in t3_sge_prep()
3371 q->coalesce_usecs = 5; in t3_sge_prep()
3372 q->rspq_size = 1024; in t3_sge_prep()
3373 q->fl_size = 1024; in t3_sge_prep()
3374 q->jumbo_size = 512; in t3_sge_prep()
3375 q->txq_size[TXQ_ETH] = 1024; in t3_sge_prep()
3376 q->txq_size[TXQ_OFLD] = 1024; in t3_sge_prep()
3377 q->txq_size[TXQ_CTRL] = 256; in t3_sge_prep()
3378 q->cong_thres = 0; in t3_sge_prep()