/drivers/net/ethernet/chelsio/cxgb/ |
D | sge.c | 256 struct sge { struct 284 static void tx_sched_stop(struct sge *sge) in tx_sched_stop() argument 286 struct sched *s = sge->tx_sched; in tx_sched_stop() 299 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, in t1_sched_update_parms() argument 302 struct sched *s = sge->tx_sched; in t1_sched_update_parms() 322 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { in t1_sched_update_parms() 345 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) 347 struct sched *s = sge->tx_sched; 352 t1_sched_update_parms(sge, i, 0, 0); 359 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, [all …]
|
D | sge.h | 72 struct sge; 74 struct sge *t1_sge_create(struct adapter *, struct sge_params *); 75 int t1_sge_configure(struct sge *, struct sge_params *); 76 int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); 77 void t1_sge_destroy(struct sge *); 83 void t1_sge_start(struct sge *); 84 void t1_sge_stop(struct sge *); 85 int t1_sge_intr_error_handler(struct sge *); 86 void t1_sge_intr_enable(struct sge *); 87 void t1_sge_intr_disable(struct sge *); [all …]
|
D | cxgb2.c | 176 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); in t1_link_negotiated() 230 t1_sge_start(adapter->sge); in cxgb_up() 241 t1_sge_stop(adapter->sge); in cxgb_down() 469 t = t1_sge_get_intr_counts(adapter->sge); in get_stats() 470 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); in get_stats() 718 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; in get_sge_param() 719 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; in get_sge_param() 720 e->tx_pending = adapter->params.sge.cmdQ_size[0]; in get_sge_param() 739 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; in set_sge_param() 740 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; in set_sge_param() [all …]
|
D | subr.c | 220 t1_sge_intr_error_handler(adapter->sge); in fpga_slow_intr() 762 t1_sge_intr_enable(adapter->sge); in t1_interrupts_enable() 794 t1_sge_intr_disable(adapter->sge); in t1_interrupts_disable() 820 t1_sge_intr_clear(adapter->sge); in t1_interrupts_clear() 854 t1_sge_intr_error_handler(adapter->sge); in asic_slow_intr() 983 err = t1_sge_configure(adapter->sge, &adapter->params.sge); in t1_init_hw_modules() 1023 if (adapter->sge) in t1_free_sw_modules() 1024 t1_sge_destroy(adapter->sge); in t1_free_sw_modules() 1060 adapter->sge = t1_sge_create(adapter, &adapter->params.sge); in t1_init_sw_modules() 1061 if (!adapter->sge) { in t1_init_sw_modules()
|
D | common.h | 187 struct sge_params sge; member 226 struct sge; 244 struct sge *sge; member
|
/drivers/infiniband/hw/ipath/ |
D | ipath_keys.c | 122 struct ib_sge *sge, int acc) in ipath_lkey_ok() argument 134 if (sge->lkey == 0) { in ipath_lkey_ok() 143 isge->vaddr = (void *) sge->addr; in ipath_lkey_ok() 144 isge->length = sge->length; in ipath_lkey_ok() 145 isge->sge_length = sge->length; in ipath_lkey_ok() 149 mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; in ipath_lkey_ok() 150 if (unlikely(mr == NULL || mr->lkey != sge->lkey || in ipath_lkey_ok() 156 off = sge->addr - mr->user_base; in ipath_lkey_ok() 157 if (unlikely(sge->addr < mr->user_base || in ipath_lkey_ok() 158 off + sge->length > mr->length || in ipath_lkey_ok() [all …]
|
D | ipath_verbs.c | 173 struct ipath_sge *sge = &ss->sge; in ipath_copy_sge() local 176 u32 len = sge->length; in ipath_copy_sge() 180 if (len > sge->sge_length) in ipath_copy_sge() 181 len = sge->sge_length; in ipath_copy_sge() 183 memcpy(sge->vaddr, data, len); in ipath_copy_sge() 184 sge->vaddr += len; in ipath_copy_sge() 185 sge->length -= len; in ipath_copy_sge() 186 sge->sge_length -= len; in ipath_copy_sge() 187 if (sge->sge_length == 0) { in ipath_copy_sge() 189 *sge = *ss->sg_list++; in ipath_copy_sge() [all …]
|
D | ipath_ruc.c | 134 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, in ipath_init_sge() 265 struct ipath_sge *sge; in ipath_ruc_loopback() local 328 sqp->s_sge.sge = wqe->sg_list[0]; in ipath_ruc_loopback() 370 qp->r_sge.sge = wqe->sg_list[0]; in ipath_ruc_loopback() 385 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; in ipath_ruc_loopback() 387 *(u64 *) sqp->s_sge.sge.vaddr = in ipath_ruc_loopback() 390 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, in ipath_ruc_loopback() 399 sge = &sqp->s_sge.sge; in ipath_ruc_loopback() 403 if (len > sge->length) in ipath_ruc_loopback() 404 len = sge->length; in ipath_ruc_loopback() [all …]
|
D | ipath_ud.c | 59 struct ipath_sge *sge; in ipath_ud_loopback() local 184 sge = swqe->sg_list; in ipath_ud_loopback() 186 u32 len = sge->length; in ipath_ud_loopback() 190 if (len > sge->sge_length) in ipath_ud_loopback() 191 len = sge->sge_length; in ipath_ud_loopback() 193 ipath_copy_sge(&rsge, sge->vaddr, len); in ipath_ud_loopback() 194 sge->vaddr += len; in ipath_ud_loopback() 195 sge->length -= len; in ipath_ud_loopback() 196 sge->sge_length -= len; in ipath_ud_loopback() 197 if (sge->sge_length == 0) { in ipath_ud_loopback() [all …]
|
D | ipath_sdma.c | 670 struct ipath_sge *sge; in ipath_sdma_verbs_send() local 729 sge = &ss->sge; in ipath_sdma_verbs_send() 735 if (len > sge->length) in ipath_sdma_verbs_send() 736 len = sge->length; in ipath_sdma_verbs_send() 737 if (len > sge->sge_length) in ipath_sdma_verbs_send() 738 len = sge->sge_length; in ipath_sdma_verbs_send() 741 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, in ipath_sdma_verbs_send() 759 sge->vaddr += len; in ipath_sdma_verbs_send() 760 sge->length -= len; in ipath_sdma_verbs_send() 761 sge->sge_length -= len; in ipath_sdma_verbs_send() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_verbs.c | 170 struct qib_sge *sge = &ss->sge; in qib_copy_sge() local 173 u32 len = sge->length; in qib_copy_sge() 177 if (len > sge->sge_length) in qib_copy_sge() 178 len = sge->sge_length; in qib_copy_sge() 180 memcpy(sge->vaddr, data, len); in qib_copy_sge() 181 sge->vaddr += len; in qib_copy_sge() 182 sge->length -= len; in qib_copy_sge() 183 sge->sge_length -= len; in qib_copy_sge() 184 if (sge->sge_length == 0) { in qib_copy_sge() 186 qib_put_mr(sge->mr); in qib_copy_sge() [all …]
|
D | qib_keys.c | 152 struct qib_sge *isge, struct ib_sge *sge, int acc) in qib_lkey_ok() argument 163 if (sge->lkey == 0) { in qib_lkey_ok() 176 isge->vaddr = (void *) sge->addr; in qib_lkey_ok() 177 isge->length = sge->length; in qib_lkey_ok() 178 isge->sge_length = sge->length; in qib_lkey_ok() 184 rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]); in qib_lkey_ok() 185 if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) in qib_lkey_ok() 188 off = sge->addr - mr->user_base; in qib_lkey_ok() 189 if (unlikely(sge->addr < mr->user_base || in qib_lkey_ok() 190 off + sge->length > mr->length || in qib_lkey_ok() [all …]
|
D | qib_ruc.c | 98 if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, in qib_init_sge() 111 struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; in qib_init_sge() local 113 qib_put_mr(sge->mr); in qib_init_sge() 360 struct qib_sge *sge; in qib_ruc_loopback() local 427 sqp->s_sge.sge = wqe->sg_list[0]; in qib_ruc_loopback() 460 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, in qib_ruc_loopback() 473 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, in qib_ruc_loopback() 481 qp->r_sge.sge = wqe->sg_list[0]; in qib_ruc_loopback() 491 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), in qib_ruc_loopback() 497 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; in qib_ruc_loopback() [all …]
|
D | qib_ud.c | 57 struct qib_sge *sge; in qib_ud_loopback() local 166 ssge.sge = *swqe->sg_list; in qib_ud_loopback() 168 sge = &ssge.sge; in qib_ud_loopback() 170 u32 len = sge->length; in qib_ud_loopback() 174 if (len > sge->sge_length) in qib_ud_loopback() 175 len = sge->sge_length; in qib_ud_loopback() 177 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1); in qib_ud_loopback() 178 sge->vaddr += len; in qib_ud_loopback() 179 sge->length -= len; in qib_ud_loopback() 180 sge->sge_length -= len; in qib_ud_loopback() [all …]
|
D | qib_sdma.c | 532 struct qib_sge *sge; in qib_sdma_verbs_send() local 580 sge = &ss->sge; in qib_sdma_verbs_send() 586 if (len > sge->length) in qib_sdma_verbs_send() 587 len = sge->length; in qib_sdma_verbs_send() 588 if (len > sge->sge_length) in qib_sdma_verbs_send() 589 len = sge->sge_length; in qib_sdma_verbs_send() 592 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr, in qib_sdma_verbs_send() 611 sge->vaddr += len; in qib_sdma_verbs_send() 612 sge->length -= len; in qib_sdma_verbs_send() 613 sge->sge_length -= len; in qib_sdma_verbs_send() [all …]
|
/drivers/scsi/csiostor/ |
D | csio_wr.c | 57 csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg) in csio_get_flbuf_size() argument 59 sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 + in csio_get_flbuf_size() 65 csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf) in csio_wr_fl_bufsz() argument 67 return sge->sge_fl_buf_size[buf->paddr & 0xF]; in csio_wr_fl_bufsz() 74 return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64; in csio_wr_qstat_pgsz() 117 struct csio_sge *sge = &wrm->sge; in csio_wr_fill_fl() local 125 buf->len = sge->sge_fl_buf_size[sreg]; in csio_wr_fill_fl() 1050 struct csio_sge *sge = &wrm->sge; in csio_wr_process_fl() local 1076 bufsz = csio_wr_fl_bufsz(sge, buf); in csio_wr_process_fl() 1097 flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align); in csio_wr_process_fl() [all …]
|
/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 159 struct sge *s = &adapter->sge; in fl_mtu_bufsize() 314 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { in unmap_sgl() 429 struct sge *s = &adapter->sge; in get_buf_size() 545 struct sge *s = &adap->sge; in refill_fl() 622 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl() 623 adap->sge.starving_fl); in refill_fl() 793 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in write_sgl() 807 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in write_sgl() 810 memcpy(sgl->sge, buf, part0); in write_sgl() 998 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in t4_eth_xmit() [all …]
|
D | cxgb4_main.c | 668 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler() 670 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { in fwevtq_handler() 786 for_each_ofldrxq(&adap->sge, i) in name_msix_vecs() 790 for_each_rdmarxq(&adap->sge, i) in name_msix_vecs() 797 struct sge *s = &adap->sge; in request_msix_queue_irqs() 851 struct sge *s = &adap->sge; in free_msix_queue_irqs() 874 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset]; in write_rss() 913 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) in rxq_to_chan() 926 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { in quiesce_rx() 927 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx() [all …]
|
D | cxgb4.h | 471 struct sge { struct 510 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) argument 511 #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) argument 512 #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) argument 568 struct sge sge; member
|
/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | cxgb4vf_main.c | 286 struct sge *s = &adapter->sge; in request_msix_queue_irqs() 324 struct sge *s = &adapter->sge; in free_msix_queue_irqs() 357 struct sge *s = &adapter->sge; in enable_rx() 380 struct sge *s = &adapter->sge; in quiesce_rx() 438 struct sge *s = &adapter->sge; in fwevtq_handler() 494 struct sge *s = &adapter->sge; in setup_sge_queues() 609 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; in setup_rss() 949 static int closest_timer(const struct sge *s, int us) in closest_timer() 965 static int closest_thres(const struct sge *s, int thres) in closest_thres() 990 ? adapter->sge.timer_val[timer_idx] in qtimer_val() [all …]
|
D | sge.c | 334 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { in unmap_sgl() 698 set_bit(fl->cntxt_id, adapter->sge.starving_fl); in refill_fl() 918 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; in write_sgl() 932 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; in write_sgl() 935 memcpy(sgl->sge, buf, part0); in write_sgl() 1110 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit() 1814 struct sge *s = &adapter->sge; in process_intrq() 1818 spin_lock(&adapter->sge.intrq_lock); in process_intrq() 1887 spin_unlock(&adapter->sge.intrq_lock); in process_intrq() 1934 struct sge *s = &adapter->sge; in sge_rx_timer_cb() [all …]
|
D | adapter.h | 260 struct sge { struct 331 #define for_each_ethrxq(sge, iter) \ argument 332 for (iter = 0; iter < (sge)->ethqsets; iter++) 355 struct sge sge; member
|
/drivers/scsi/be2iscsi/ |
D | be_mgmt.c | 245 struct be_sge *sge; in mgmt_get_session_info() local 262 sge = nonembedded_sgl(wrb); in mgmt_get_session_info() 272 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); in mgmt_get_session_info() 273 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); in mgmt_get_session_info() 274 sge->len = cpu_to_le32(nonemb_cmd->size); in mgmt_get_session_info() 331 struct be_sge *sge = nonembedded_sgl(wrb); in mgmt_check_supported_fw() local 351 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); in mgmt_check_supported_fw() 352 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); in mgmt_check_supported_fw() 353 sge->len = cpu_to_le32(nonemb_cmd.size); in mgmt_check_supported_fw() 485 struct be_sge *sge; in mgmt_invalidate_icds() local [all …]
|
/drivers/ata/ |
D | sata_sil24.c | 257 struct sil24_sge sge[SIL24_MAX_SGE]; member 263 struct sil24_sge sge[SIL24_MAX_SGE]; member 777 struct sil24_sge *sge) in sil24_fill_sg() argument 784 sge->addr = cpu_to_le64(sg_dma_address(sg)); in sil24_fill_sg() 785 sge->cnt = cpu_to_le32(sg_dma_len(sg)); in sil24_fill_sg() 786 sge->flags = 0; in sil24_fill_sg() 788 last_sge = sge; in sil24_fill_sg() 789 sge++; in sil24_fill_sg() 846 struct sil24_sge *sge; in sil24_qc_prep() local 853 sge = cb->ata.sge; in sil24_qc_prep() [all …]
|
/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 679 spin_lock_irq(&adapter->sge.reg_lock); in t3_free_qset() 681 spin_unlock_irq(&adapter->sge.reg_lock); in t3_free_qset() 692 spin_lock_irq(&adapter->sge.reg_lock); in t3_free_qset() 694 spin_unlock_irq(&adapter->sge.reg_lock); in t3_free_qset() 708 spin_lock_irq(&adapter->sge.reg_lock); in t3_free_qset() 710 spin_unlock_irq(&adapter->sge.reg_lock); in t3_free_qset() 1516 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); in t3_mgmt_tx() 1770 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; in t3_offload_tx() 2029 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); in rx_eth() 2131 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); in lro_add_page() [all …]
|