Home
last modified time | relevance | path

Searched refs:qp (Results 1 – 25 of 147) sorted by relevance

123456

/drivers/infiniband/hw/qib/
Dqib_rc.c57 static void start_timer(struct qib_qp *qp) in start_timer() argument
59 qp->s_flags |= QIB_S_TIMER; in start_timer()
60 qp->s_timer.function = rc_timeout; in start_timer()
62 qp->s_timer.expires = jiffies + qp->timeout_jiffies; in start_timer()
63 add_timer(&qp->s_timer); in start_timer()
77 static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, in qib_make_rc_ack() argument
87 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) in qib_make_rc_ack()
93 switch (qp->s_ack_state) { in qib_make_rc_ack()
96 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack()
108 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack()
[all …]
Dqib_qp.c219 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) in insert_qp() argument
221 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in insert_qp()
223 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); in insert_qp()
226 atomic_inc(&qp->refcount); in insert_qp()
228 if (qp->ibqp.qp_num == 0) in insert_qp()
229 rcu_assign_pointer(ibp->qp0, qp); in insert_qp()
230 else if (qp->ibqp.qp_num == 1) in insert_qp()
231 rcu_assign_pointer(ibp->qp1, qp); in insert_qp()
233 qp->next = dev->qp_table[n]; in insert_qp()
234 rcu_assign_pointer(dev->qp_table[n], qp); in insert_qp()
[all …]
Dqib_uc.c46 int qib_make_uc_req(struct qib_qp *qp) in qib_make_uc_req() argument
54 u32 pmtu = qp->pmtu; in qib_make_uc_req()
57 spin_lock_irqsave(&qp->s_lock, flags); in qib_make_uc_req()
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { in qib_make_uc_req()
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) in qib_make_uc_req()
63 if (qp->s_last == qp->s_head) in qib_make_uc_req()
66 if (atomic_read(&qp->s_dma_busy)) { in qib_make_uc_req()
67 qp->s_flags |= QIB_S_WAIT_DMA; in qib_make_uc_req()
70 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req()
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req()
[all …]
Dqib_ruc.c81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) in qib_init_sge() argument
89 rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_init_sge()
90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); in qib_init_sge()
91 ss = &qp->r_sge; in qib_init_sge()
92 ss->sg_list = qp->r_sg_list; in qib_init_sge()
93 qp->r_len = 0; in qib_init_sge()
101 qp->r_len += wqe->sg_list[i].length; in qib_init_sge()
105 ss->total_len = qp->r_len; in qib_init_sge()
120 wc.qp = &qp->ibqp; in qib_init_sge()
122 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_init_sge()
[all …]
Dqib_ud.c53 struct qib_qp *qp; in qib_ud_loopback() local
61 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); in qib_ud_loopback()
62 if (!qp) { in qib_ud_loopback()
66 if (qp->ibqp.qp_type != sqp->ibqp.qp_type || in qib_ud_loopback()
67 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { in qib_ud_loopback()
75 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback()
81 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); in qib_ud_loopback()
87 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback()
99 if (qp->ibqp.qp_num) { in qib_ud_loopback()
104 if (unlikely(qkey != qp->qkey)) { in qib_ud_loopback()
[all …]
Dqib_verbs.c336 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, in qib_post_one_send() argument
349 spin_lock_irqsave(&qp->s_lock, flags); in qib_post_one_send()
352 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) in qib_post_one_send()
356 if (wr->num_sge > qp->s_max_sge) in qib_post_one_send()
365 if (qib_fast_reg_mr(qp, wr)) in qib_post_one_send()
367 } else if (qp->ibqp.qp_type == IB_QPT_UC) { in qib_post_one_send()
370 } else if (qp->ibqp.qp_type != IB_QPT_RC) { in qib_post_one_send()
376 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in qib_post_one_send()
385 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in qib_post_one_send()
388 next = qp->s_head + 1; in qib_post_one_send()
[all …]
/drivers/infiniband/hw/ipath/
Dipath_rc.c62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) in ipath_init_restart() argument
66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, in ipath_init_restart()
67 ib_mtu_enum_to_int(qp->path_mtu)); in ipath_init_restart()
68 dev = to_idev(qp->ibqp.device); in ipath_init_restart()
70 if (list_empty(&qp->timerwait)) in ipath_init_restart()
71 list_add_tail(&qp->timerwait, in ipath_init_restart()
86 static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp, in ipath_make_rc_ack() argument
96 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) in ipath_make_rc_ack()
102 switch (qp->s_ack_state) { in ipath_make_rc_ack()
111 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) in ipath_make_rc_ack()
[all …]
Dipath_qp.c209 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, in ipath_alloc_qpn() argument
218 qp->ibqp.qp_num = ret; in ipath_alloc_qpn()
224 qp->next = qpt->table[ret]; in ipath_alloc_qpn()
225 qpt->table[ret] = qp; in ipath_alloc_qpn()
226 atomic_inc(&qp->refcount); in ipath_alloc_qpn()
243 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) in ipath_free_qp() argument
251 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; in ipath_free_qp()
253 if (q == qp) { in ipath_free_qp()
254 *qpp = qp->next; in ipath_free_qp()
255 qp->next = NULL; in ipath_free_qp()
[all …]
Dipath_uc.c46 int ipath_make_uc_req(struct ipath_qp *qp) in ipath_make_uc_req() argument
54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); in ipath_make_uc_req()
57 spin_lock_irqsave(&qp->s_lock, flags); in ipath_make_uc_req()
59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { in ipath_make_uc_req()
60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) in ipath_make_uc_req()
63 if (qp->s_last == qp->s_head) in ipath_make_uc_req()
66 if (atomic_read(&qp->s_dma_busy)) { in ipath_make_uc_req()
67 qp->s_flags |= IPATH_S_WAIT_DMA; in ipath_make_uc_req()
70 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_uc_req()
71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in ipath_make_uc_req()
[all …]
Dipath_ruc.c87 void ipath_insert_rnr_queue(struct ipath_qp *qp) in ipath_insert_rnr_queue() argument
89 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_insert_rnr_queue()
94 list_add(&qp->timerwait, &dev->rnrwait); in ipath_insert_rnr_queue()
100 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { in ipath_insert_rnr_queue()
101 qp->s_rnr_timeout -= nqp->s_rnr_timeout; in ipath_insert_rnr_queue()
111 nqp->s_rnr_timeout -= qp->s_rnr_timeout; in ipath_insert_rnr_queue()
112 list_add(&qp->timerwait, l); in ipath_insert_rnr_queue()
123 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, in ipath_init_sge() argument
134 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, in ipath_init_sge()
149 wc.qp = &qp->ibqp; in ipath_init_sge()
[all …]
Dipath_ud.c53 struct ipath_qp *qp; in ipath_ud_loopback() local
68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); in ipath_ud_loopback()
69 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { in ipath_ud_loopback()
79 if (unlikely(qp->ibqp.qp_num && in ipath_ud_loopback()
81 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { in ipath_ud_loopback()
107 if (qp->ibqp.srq) { in ipath_ud_loopback()
108 srq = to_isrq(qp->ibqp.srq); in ipath_ud_loopback()
114 rq = &qp->r_rq; in ipath_ud_loopback()
134 rsge.sg_list = qp->r_ud_sg_list; in ipath_ud_loopback()
135 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { in ipath_ud_loopback()
[all …]
Dipath_verbs.c336 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) in ipath_post_one_send() argument
345 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; in ipath_post_one_send()
347 spin_lock_irqsave(&qp->s_lock, flags); in ipath_post_one_send()
349 if (qp->ibqp.qp_type != IB_QPT_SMI && in ipath_post_one_send()
356 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) in ipath_post_one_send()
360 if (wr->num_sge > qp->s_max_sge) in ipath_post_one_send()
368 if (qp->ibqp.qp_type == IB_QPT_UC) { in ipath_post_one_send()
371 } else if (qp->ibqp.qp_type == IB_QPT_UD) { in ipath_post_one_send()
377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in ipath_post_one_send()
386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in ipath_post_one_send()
[all …]
/drivers/ntb/
Dntb_transport.c97 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
106 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
186 #define QP_TO_MW(qp) ((qp) % NTB_NUM_MW) argument
381 struct ntb_transport_qp *qp; in debugfs_read() local
391 qp = filp->private_data; in debugfs_read()
396 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read()
398 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read()
400 "rx_ring_empty - %llu\n", qp->rx_ring_empty); in debugfs_read()
402 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); in debugfs_read()
404 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); in debugfs_read()
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_qp.c195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument
197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument
203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
207 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument
209 if (qp->is_direct) in get_recv_wqe()
210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe()
212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
[all …]
/drivers/infiniband/hw/mlx4/
Dqp.c75 struct mlx4_ib_qp qp; member
112 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
115 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
120 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
121 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
125 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
132 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
133 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
139 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
140 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
[all …]
/drivers/infiniband/hw/amso1100/
Dc2_qp.c120 void c2_set_qp_state(struct c2_qp *qp, int c2_state) in c2_set_qp_state() argument
126 qp, in c2_set_qp_state()
127 to_ib_state_str(qp->state), in c2_set_qp_state()
129 qp->state = new_state; in c2_set_qp_state()
134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_modify() argument
146 qp, in c2_qp_modify()
147 to_ib_state_str(qp->state), in c2_qp_modify()
157 wr.qp_handle = qp->adapter_handle; in c2_qp_modify()
173 spin_lock_irqsave(&qp->lock, flags); in c2_qp_modify()
174 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify()
[all …]
Dc2_ae.c184 struct c2_qp *qp = (struct c2_qp *)resource_user_context; in c2_ae_event() local
185 struct iw_cm_id *cm_id = qp->cm_id; in c2_ae_event()
190 qp); in c2_ae_event()
203 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); in c2_ae_event()
218 spin_lock_irqsave(&qp->lock, flags); in c2_ae_event()
219 if (qp->cm_id) { in c2_ae_event()
220 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event()
221 qp->cm_id = NULL; in c2_ae_event()
223 spin_unlock_irqrestore(&qp->lock, flags); in c2_ae_event()
233 ib_event.element.qp = &qp->ibqp; in c2_ae_event()
[all …]
/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c818 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) in ocrdma_add_qpn_map() argument
822 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { in ocrdma_add_qpn_map()
823 dev->qp_tbl[qp->id] = qp; in ocrdma_add_qpn_map()
829 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) in ocrdma_del_qpn_map() argument
831 dev->qp_tbl[qp->id] = NULL; in ocrdma_del_qpn_map()
907 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, in ocrdma_copy_qp_uresp() argument
914 struct ocrdma_dev *dev = qp->dev; in ocrdma_copy_qp_uresp()
915 struct ocrdma_pd *pd = qp->pd; in ocrdma_copy_qp_uresp()
920 uresp.qp_id = qp->id; in ocrdma_copy_qp_uresp()
921 uresp.sq_dbid = qp->sq.dbid; in ocrdma_copy_qp_uresp()
[all …]
Docrdma_hw.c663 struct ocrdma_qp *qp) in ocrdma_process_qpcat_error() argument
668 if (qp == NULL) in ocrdma_process_qpcat_error()
670 ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps); in ocrdma_process_qpcat_error()
676 struct ocrdma_qp *qp = NULL; in ocrdma_dispatch_ibevent() local
687 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK]; in ocrdma_dispatch_ibevent()
705 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent()
707 ocrdma_process_qpcat_error(dev, qp); in ocrdma_dispatch_ibevent()
710 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent()
714 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent()
718 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent()
[all …]
/drivers/scsi/bnx2i/
Dbnx2i_hwi.c151 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; in bnx2i_arm_cq_event_coalescing()
168 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; in bnx2i_arm_cq_event_coalescing()
169 if (cq_index > ep->qp.cqe_size * 2) in bnx2i_arm_cq_event_coalescing()
170 cq_index -= ep->qp.cqe_size * 2; in bnx2i_arm_cq_event_coalescing()
193 if (!bnx2i_conn->ep->qp.rqe_left) in bnx2i_get_rq_buf()
196 bnx2i_conn->ep->qp.rqe_left--; in bnx2i_get_rq_buf()
197 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); in bnx2i_get_rq_buf()
198 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { in bnx2i_get_rq_buf()
199 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; in bnx2i_get_rq_buf()
200 bnx2i_conn->ep->qp.rq_cons_idx = 0; in bnx2i_get_rq_buf()
[all …]
/drivers/infiniband/core/
Dverbs.c348 struct ib_qp *qp = context; in __ib_shared_qp_event_handler() local
350 list_for_each_entry(event->element.qp, &qp->open_list, open_list) in __ib_shared_qp_event_handler()
351 if (event->element.qp->event_handler) in __ib_shared_qp_event_handler()
352 event->element.qp->event_handler(event, event->element.qp->qp_context); in __ib_shared_qp_event_handler()
355 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) in __ib_insert_xrcd_qp() argument
358 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); in __ib_insert_xrcd_qp()
366 struct ib_qp *qp; in __ib_open_qp() local
369 qp = kzalloc(sizeof *qp, GFP_KERNEL); in __ib_open_qp()
370 if (!qp) in __ib_open_qp()
373 qp->real_qp = real_qp; in __ib_open_qp()
[all …]
Diwcm.c223 static int iwcm_modify_qp_err(struct ib_qp *qp) in iwcm_modify_qp_err() argument
227 if (!qp) in iwcm_modify_qp_err()
231 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in iwcm_modify_qp_err()
238 static int iwcm_modify_qp_sqd(struct ib_qp *qp) in iwcm_modify_qp_sqd() argument
242 BUG_ON(qp == NULL); in iwcm_modify_qp_sqd()
244 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in iwcm_modify_qp_sqd()
264 struct ib_qp *qp = NULL; in iw_cm_disconnect() local
277 if (cm_id_priv->qp) in iw_cm_disconnect()
278 qp = cm_id_priv->qp; in iw_cm_disconnect()
303 if (qp) { in iw_cm_disconnect()
[all …]
/drivers/infiniband/hw/ehca/
Dehca_uverbs.c198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, in ehca_mmap_qp() argument
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); in ehca_mmap_qp()
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); in ehca_mmap_qp()
208 ehca_err(qp->ib_qp.device, in ehca_mmap_qp()
210 ret, qp->ib_qp.qp_num); in ehca_mmap_qp()
216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); in ehca_mmap_qp()
217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, in ehca_mmap_qp()
218 &qp->mm_count_rqueue); in ehca_mmap_qp()
220 ehca_err(qp->ib_qp.device, in ehca_mmap_qp()
222 ret, qp->ib_qp.qp_num); in ehca_mmap_qp()
[all …]
/drivers/net/ethernet/mellanox/mlx4/
Dqp.c49 struct mlx4_qp *qp; in mlx4_qp_event() local
53 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event()
54 if (qp) in mlx4_qp_event()
55 atomic_inc(&qp->refcount); in mlx4_qp_event()
59 if (!qp) { in mlx4_qp_event()
64 qp->event(qp, event_type); in mlx4_qp_event()
66 if (atomic_dec_and_test(&qp->refcount)) in mlx4_qp_event()
67 complete(&qp->free); in mlx4_qp_event()
71 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) in is_master_qp0() argument
76 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; in is_master_qp0()
[all …]
/drivers/net/
Dntb_netdev.c65 struct ntb_transport_qp *qp; member
79 ntb_transport_link_query(dev->qp)); in ntb_netdev_event_handler()
88 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, in ntb_netdev_rx_handler() argument
120 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); in ntb_netdev_rx_handler()
128 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, in ntb_netdev_tx_handler() argument
157 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); in ntb_netdev_start_xmit()
183 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, in ntb_netdev_open()
190 ntb_transport_link_up(dev->qp); in ntb_netdev_open()
195 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) in ntb_netdev_open()
206 ntb_transport_link_down(dev->qp); in ntb_netdev_close()
[all …]

123456