Home
last modified time | relevance | path

Searched refs:ibdev (Results 1 – 25 of 106) sorted by relevance

12345

/drivers/infiniband/hw/mlx4/
Dmain.c82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
134 struct mlx4_ib_dev *ibdev = to_mdev(device); in mlx4_ib_get_netdev() local
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); in mlx4_ib_get_netdev()
141 if (mlx4_is_bonded(ibdev->dev)) { in mlx4_ib_get_netdev()
162 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids_v1() argument
167 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_update_gids_v1()
195 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids_v1_v2() argument
200 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_update_gids_v1_v2()
240 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids() argument
243 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) in mlx4_ib_update_gids()
[all …]
Dcm.c135 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) in gid_from_req_msg() argument
144 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) in id_map_find_by_sl_id() argument
146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; in id_map_find_by_sl_id()
189 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) in id_map_find_del() argument
191 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_find_del()
199 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id); in id_map_find_del()
206 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) in sl_id_map_add() argument
208 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; in sl_id_map_add()
214 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); in sl_id_map_add()
239 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) in id_map_alloc() argument
[all …]
/drivers/infiniband/sw/rdmavt/
Dvt.c94 rdi = container_of(_ib_alloc_device(size), struct rvt_dev_info, ibdev); in rvt_alloc_device()
102 ib_dealloc_device(&rdi->ibdev); in rvt_alloc_device()
117 ib_dealloc_device(&rdi->ibdev); in rvt_dealloc_device()
121 static int rvt_query_device(struct ib_device *ibdev, in rvt_query_device() argument
125 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); in rvt_query_device()
156 static int rvt_query_port(struct ib_device *ibdev, u8 port_num, in rvt_query_port() argument
159 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); in rvt_query_port()
161 int port_index = ibport_num_to_idx(ibdev, port_num); in rvt_query_port()
191 static int rvt_modify_port(struct ib_device *ibdev, u8 port_num, in rvt_modify_port() argument
194 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); in rvt_modify_port()
[all …]
Dah.c62 int rvt_check_ah(struct ib_device *ibdev, in rvt_check_ah() argument
68 struct rvt_dev_info *rdi = ib_to_rvt(ibdev); in rvt_check_ah()
72 err = ib_query_port(ibdev, port_num, &port_attr); in rvt_check_ah()
76 port_num > ibdev->phys_port_cnt) in rvt_check_ah()
85 return rdi->driver_f.check_ah(ibdev, ah_attr); in rvt_check_ah()
/drivers/infiniband/hw/mlx5/
Dib_rep.c13 struct mlx5_ib_dev *ibdev; in mlx5_ib_set_vport_rep() local
16 ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch); in mlx5_ib_set_vport_rep()
19 ibdev->port[vport_index].rep = rep; in mlx5_ib_set_vport_rep()
20 rep->rep_data[REP_IB].priv = ibdev; in mlx5_ib_set_vport_rep()
21 write_lock(&ibdev->port[vport_index].roce.netdev_lock); in mlx5_ib_set_vport_rep()
22 ibdev->port[vport_index].roce.netdev = in mlx5_ib_set_vport_rep()
24 write_unlock(&ibdev->port[vport_index].roce.netdev_lock); in mlx5_ib_set_vport_rep()
34 struct mlx5_ib_dev *ibdev; in mlx5_ib_vport_rep_load() local
42 ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev); in mlx5_ib_vport_rep_load()
43 if (!ibdev) in mlx5_ib_vport_rep_load()
[all …]
Dmad.c77 static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, in process_mad() argument
111 err = mlx5_MAD_IFC(to_mdev(ibdev), in process_mad()
272 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, in mlx5_ib_process_mad() argument
278 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_process_mad()
294 ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in mlx5_ib_process_mad()
329 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, in mlx5_query_mad_ifc_smp_attr_node_info() argument
342 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, in mlx5_query_mad_ifc_smp_attr_node_info()
349 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, in mlx5_query_mad_ifc_system_image_guid() argument
359 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); in mlx5_query_mad_ifc_system_image_guid()
371 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, in mlx5_query_mad_ifc_max_pkeys() argument
[all …]
Dmain.c119 dev = mpi->ibdev; in mlx5_ib_get_ibdev_from_mpi()
146 static int get_port_state(struct ib_device *ibdev, in get_port_state() argument
154 ret = ibdev->ops.query_port(ibdev, port_num, &attr); in get_port_state()
195 struct mlx5_ib_dev *ibdev; in mlx5_netdev_event() local
197 ibdev = roce->dev; in mlx5_netdev_event()
198 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); in mlx5_netdev_event()
205 if (ibdev->is_rep) in mlx5_netdev_event()
232 if (ibdev->is_rep) in mlx5_netdev_event()
233 roce = mlx5_get_rep_roce(ibdev, ndev, &port_num); in mlx5_netdev_event()
237 && ibdev->ib_active) { in mlx5_netdev_event()
[all …]
/drivers/infiniband/hw/cxgb4/
Dprovider.c78 struct ib_device *ibdev = ucontext->device; in c4iw_alloc_ucontext() local
80 struct c4iw_dev *rhp = to_c4iw_dev(ibdev); in c4iw_alloc_ucontext()
85 pr_debug("ibdev %p\n", ibdev); in c4iw_alloc_ucontext()
210 struct ib_device *ibdev = pd->device; in c4iw_allocate_pd() local
214 pr_debug("ibdev %p\n", ibdev); in c4iw_allocate_pd()
215 rhp = (struct c4iw_dev *) ibdev; in c4iw_allocate_pd()
239 static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, in c4iw_query_pkey() argument
242 pr_debug("ibdev %p\n", ibdev); in c4iw_query_pkey()
247 static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, in c4iw_query_gid() argument
253 ibdev, port, index, gid); in c4iw_query_gid()
[all …]
/drivers/infiniband/hw/efa/
Defa_verbs.c115 static inline struct efa_dev *to_edev(struct ib_device *ibdev) in to_edev() argument
117 return container_of(ibdev, struct efa_dev, ibdev); in to_edev()
167 ibdev_err(&dev->ibdev, "Failed to map DMA address\n"); in efa_zalloc_mapped()
192 &dev->ibdev, in mmap_entries_remove_free()
219 ibdev_dbg(&dev->ibdev, in mmap_entry_get()
262 &dev->ibdev, in mmap_entry_insert()
275 int efa_query_device(struct ib_device *ibdev, in efa_query_device() argument
281 struct efa_dev *dev = to_edev(ibdev); in efa_query_device()
286 ibdev_dbg(ibdev, in efa_query_device()
319 ibdev_dbg(ibdev, in efa_query_device()
[all …]
Defa.h52 struct ib_device ibdev; member
119 int efa_query_device(struct ib_device *ibdev,
122 int efa_query_port(struct ib_device *ibdev, u8 port,
127 int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
129 int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
144 int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
157 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
159 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
160 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
Defa_main.c48 ibdev_err(&dev->ibdev, in unimplemented_aenq_handler()
269 dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED; in efa_ib_device_add()
270 dev->ibdev.phys_port_cnt = 1; in efa_ib_device_add()
271 dev->ibdev.num_comp_vectors = 1; in efa_ib_device_add()
272 dev->ibdev.dev.parent = &pdev->dev; in efa_ib_device_add()
274 dev->ibdev.uverbs_cmd_mask = in efa_ib_device_add()
292 dev->ibdev.uverbs_ex_cmd_mask = in efa_ib_device_add()
295 ib_set_device_ops(&dev->ibdev, &efa_dev_ops); in efa_ib_device_add()
297 err = ib_register_device(&dev->ibdev, "efa_%d"); in efa_ib_device_add()
301 ibdev_info(&dev->ibdev, "IB device registered\n"); in efa_ib_device_add()
[all …]
/drivers/infiniband/hw/ocrdma/
Docrdma_main.c85 static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, in ocrdma_port_immutable() argument
92 dev = get_ocrdma_dev(ibdev); in ocrdma_port_immutable()
97 err = ib_query_port(ibdev, port_num, &attr); in ocrdma_port_immutable()
120 rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev); in hw_rev_show()
130 rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev); in hca_type_show()
204 ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); in ocrdma_register_device()
206 memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, in ocrdma_register_device()
208 dev->ibdev.uverbs_cmd_mask = in ocrdma_register_device()
229 dev->ibdev.uverbs_cmd_mask |= in ocrdma_register_device()
235 dev->ibdev.node_type = RDMA_NODE_IB_CA; in ocrdma_register_device()
[all …]
/drivers/infiniband/hw/qib/
Dqib_mad.c208 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); in qib_cap_mask_chg() local
209 struct qib_devdata *dd = dd_from_dev(ibdev); in qib_cap_mask_chg()
268 struct ib_device *ibdev) in subn_get_nodedescription() argument
273 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); in subn_get_nodedescription()
278 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_nodeinfo() argument
282 struct qib_devdata *dd = dd_from_ibdev(ibdev); in subn_get_nodeinfo()
296 nip->num_ports = ibdev->phys_port_cnt; in subn_get_nodeinfo()
313 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_guidinfo() argument
316 struct qib_devdata *dd = dd_from_ibdev(ibdev); in subn_get_guidinfo()
454 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_portinfo() argument
[all …]
/drivers/infiniband/hw/hfi1/
Dmad.c116 event.device = &dd->verbs_dev.rdi.ibdev; in hfi1_event_pkey_change()
303 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num); in hfi1_create_qp0_ah()
583 u8 *data, struct ib_device *ibdev, in __subn_get_opa_nodedesc() argument
595 memcpy(nd->data, ibdev->node_desc, sizeof(nd->data)); in __subn_get_opa_nodedesc()
604 struct ib_device *ibdev, u8 port, in __subn_get_opa_nodeinfo() argument
608 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); in __subn_get_opa_nodeinfo()
614 if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 || in __subn_get_opa_nodeinfo()
616 get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) { in __subn_get_opa_nodeinfo()
621 ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX); in __subn_get_opa_nodeinfo()
625 ni->num_ports = ibdev->phys_port_cnt; in __subn_get_opa_nodeinfo()
[all …]
/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_verbs.c65 int pvrdma_query_device(struct ib_device *ibdev, in pvrdma_query_device() argument
69 struct pvrdma_dev *dev = to_vdev(ibdev); in pvrdma_query_device()
128 int pvrdma_query_port(struct ib_device *ibdev, u8 port, in pvrdma_query_port() argument
131 struct pvrdma_dev *dev = to_vdev(ibdev); in pvrdma_query_port()
186 int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index, in pvrdma_query_gid() argument
189 struct pvrdma_dev *dev = to_vdev(ibdev); in pvrdma_query_gid()
208 int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, in pvrdma_query_pkey() argument
221 err = pvrdma_cmd_post(to_vdev(ibdev), &req, &rsp, in pvrdma_query_pkey()
224 dev_warn(&to_vdev(ibdev)->pdev->dev, in pvrdma_query_pkey()
234 enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev, in pvrdma_port_link_layer() argument
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_provider.c61 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, in mthca_query_device() argument
67 struct mthca_dev *mdev = to_mdev(ibdev); in mthca_query_device()
139 static int mthca_query_port(struct ib_device *ibdev, in mthca_query_port() argument
157 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, in mthca_query_port()
169 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; in mthca_query_port()
171 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; in mthca_query_port()
188 static int mthca_modify_device(struct ib_device *ibdev, in mthca_modify_device() argument
196 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) in mthca_modify_device()
198 memcpy(ibdev->node_desc, props->node_desc, in mthca_modify_device()
200 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); in mthca_modify_device()
[all …]
Dmthca_mad.c107 static void smp_snoop(struct ib_device *ibdev, in smp_snoop() argument
122 mthca_update_rate(to_mdev(ibdev), port_num); in smp_snoop()
123 update_sm_ah(to_mdev(ibdev), port_num, in smp_snoop()
127 event.device = ibdev; in smp_snoop()
142 event.device = ibdev; in smp_snoop()
199 int mthca_process_mad(struct ib_device *ibdev, in mthca_process_mad() argument
222 forward_trap(to_mdev(ibdev), port_num, in_mad); in mthca_process_mad()
259 !ib_query_port(ibdev, port_num, &pattr)) in mthca_process_mad()
262 err = mthca_MAD_IFC(to_mdev(ibdev), in mthca_process_mad()
269 mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err); in mthca_process_mad()
[all …]
/drivers/infiniband/hw/cxgb3/
Diwch_provider.c80 struct ib_device *ibdev = ucontext->device; in iwch_alloc_ucontext() local
82 struct iwch_dev *rhp = to_iwch_dev(ibdev); in iwch_alloc_ucontext()
84 pr_debug("%s ibdev %p\n", __func__, ibdev); in iwch_alloc_ucontext()
109 struct ib_device *ibdev = ibcq->device; in iwch_create_cq() local
118 pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries); in iwch_create_cq()
307 struct ib_device *ibdev = pd->device; in iwch_allocate_pd() local
311 pr_debug("%s ibdev %p\n", __func__, ibdev); in iwch_allocate_pd()
312 rhp = (struct iwch_dev *) ibdev; in iwch_allocate_pd()
911 static int iwch_query_pkey(struct ib_device *ibdev, in iwch_query_pkey() argument
914 pr_debug("%s ibdev %p\n", __func__, ibdev); in iwch_query_pkey()
[all …]
Diwch.h105 struct ib_device ibdev; member
116 static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) in to_iwch_dev() argument
118 return container_of(ibdev, struct iwch_dev, ibdev); in to_iwch_dev()
/drivers/infiniband/hw/qedr/
Dmain.c61 ibev.device = &dev->ibdev; in qedr_ib_dispatch_event()
74 static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str) in qedr_get_dev_fw_str() argument
76 struct qedr_dev *qedr = get_qedr_dev(ibdev); in qedr_get_dev_fw_str()
84 static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num, in qedr_roce_port_immutable() argument
90 err = qedr_query_port(ibdev, port_num, &attr); in qedr_roce_port_immutable()
103 static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num, in qedr_iw_port_immutable() argument
109 err = qedr_query_port(ibdev, port_num, &attr); in qedr_iw_port_immutable()
126 rdma_device_to_drv_device(device, struct qedr_dev, ibdev); in hw_rev_show()
136 rdma_device_to_drv_device(device, struct qedr_dev, ibdev); in hca_type_show()
140 rdma_protocol_iwarp(&dev->ibdev, 1) ? in hca_type_show()
[all …]
/drivers/infiniband/hw/bnxt_re/
Dib_verbs.h145 int bnxt_re_query_device(struct ib_device *ibdev,
148 int bnxt_re_modify_device(struct ib_device *ibdev,
151 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
153 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
155 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
156 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
160 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
162 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
Dmain.c568 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); in hw_rev_show()
578 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); in hca_type_show()
580 return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc); in hca_type_show()
596 ib_unregister_device(&rdev->ibdev); in bnxt_re_unregister_ib()
653 struct ib_device *ibdev = &rdev->ibdev; in bnxt_re_register_ib() local
657 ibdev->node_type = RDMA_NODE_IB_CA; in bnxt_re_register_ib()
658 strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA", in bnxt_re_register_ib()
660 ibdev->phys_port_cnt = 1; in bnxt_re_register_ib()
662 bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid); in bnxt_re_register_ib()
664 ibdev->num_comp_vectors = 1; in bnxt_re_register_ib()
[all …]
/drivers/infiniband/core/
Duverbs_uapi.c58 struct ib_device *ibdev, in uapi_create_write() argument
83 method_elm->disabled = !(ibdev->uverbs_ex_cmd_mask & in uapi_create_write()
86 method_elm->disabled = !(ibdev->uverbs_cmd_mask & in uapi_create_write()
273 static int uapi_merge_def(struct uverbs_api *uapi, struct ib_device *ibdev, in uapi_merge_def() argument
289 rc = uapi_merge_def(uapi, ibdev, def->chain, is_driver); in uapi_merge_def()
311 (void *)(&ibdev->ops) + def->needs_fn_offset; in uapi_merge_def()
323 if (def->func_is_supported(ibdev)) in uapi_merge_def()
344 uapi, ibdev, def, cur_obj_key, &cur_method_key); in uapi_merge_def()
640 struct uverbs_api *uverbs_alloc_api(struct ib_device *ibdev) in uverbs_alloc_api() argument
650 uapi->driver_id = ibdev->ops.driver_id; in uverbs_alloc_api()
[all …]
Ddevice.c196 static void __ibdev_printk(const char *level, const struct ib_device *ibdev, in __ibdev_printk() argument
199 if (ibdev && ibdev->dev.parent) in __ibdev_printk()
201 ibdev->dev.parent, in __ibdev_printk()
203 dev_driver_string(ibdev->dev.parent), in __ibdev_printk()
204 dev_name(ibdev->dev.parent), in __ibdev_printk()
205 dev_name(&ibdev->dev), in __ibdev_printk()
207 else if (ibdev) in __ibdev_printk()
209 level, dev_name(&ibdev->dev), vaf); in __ibdev_printk()
214 void ibdev_printk(const char *level, const struct ib_device *ibdev, in ibdev_printk() argument
225 __ibdev_printk(level, ibdev, &vaf); in ibdev_printk()
[all …]
/drivers/infiniband/hw/usnic/
Dusnic_ib_verbs.h41 int usnic_ib_query_device(struct ib_device *ibdev,
44 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
49 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
51 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,

12345