/drivers/target/ |
D | target_core_iblock.c | 57 struct iblock_dev *ib_dev = NULL; in iblock_alloc_device() local 59 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); in iblock_alloc_device() 60 if (!ib_dev) { in iblock_alloc_device() 65 ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug), in iblock_alloc_device() 67 if (!ib_dev->ibd_plug) in iblock_alloc_device() 72 return &ib_dev->dev; in iblock_alloc_device() 75 kfree(ib_dev); in iblock_alloc_device() 81 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); in iblock_configure_unmap() local 84 ib_dev->ibd_bd); in iblock_configure_unmap() 89 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); in iblock_configure_device() local [all …]
|
/drivers/infiniband/core/ |
D | roce_gid_mgmt.c | 82 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port) in roce_gid_type_mask_support() argument 87 if (!rdma_protocol_roce(ib_dev, port)) in roce_gid_type_mask_support() 91 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port)) in roce_gid_type_mask_support() 98 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, in update_gid() argument 103 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port); in update_gid() 110 ib_cache_gid_add(ib_dev, port, in update_gid() 114 ib_cache_gid_del(ib_dev, port, in update_gid() 147 is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port, in is_eth_port_of_netdev_filter() argument 171 is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port, in is_eth_port_inactive_slave_filter() argument 201 is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port, in is_ndev_for_default_gid_filter() argument [all …]
|
D | cache.c | 124 static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port) in dispatch_gid_change_event() argument 128 event.device = ib_dev; in dispatch_gid_change_event() 382 static void del_gid(struct ib_device *ib_dev, u32 port, in del_gid() argument 390 dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port, in del_gid() 399 if (!rdma_protocol_roce(ib_dev, port)) in del_gid() 410 if (rdma_cap_roce_gid_table(ib_dev, port)) in del_gid() 411 ib_dev->ops.del_gid(&entry->attr, &entry->context); in del_gid() 546 static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port, in __ib_cache_gid_add() argument 562 table = rdma_gid_table(ib_dev, port); in __ib_cache_gid_add() 574 attr->device = ib_dev; in __ib_cache_gid_add() [all …]
|
D | device.c | 186 static void free_netdevs(struct ib_device *ib_dev); 821 pdata->ib_dev = device; in alloc_port_data() 1344 static void prevent_dealloc_device(struct ib_device *ib_dev) in prevent_dealloc_device() argument 1462 static void __ib_unregister_device(struct ib_device *ib_dev) in __ib_unregister_device() argument 1471 mutex_lock(&ib_dev->unregistration_lock); in __ib_unregister_device() 1472 if (!refcount_read(&ib_dev->refcount)) in __ib_unregister_device() 1475 disable_device(ib_dev); in __ib_unregister_device() 1478 free_netdevs(ib_dev); in __ib_unregister_device() 1480 ib_free_port_attrs(&ib_dev->coredev); in __ib_unregister_device() 1481 device_del(&ib_dev->dev); in __ib_unregister_device() [all …]
|
D | uverbs_std_types_device.c | 141 struct ib_device *ib_dev, u8 port_num) in copy_port_attr_to_resp() argument 153 if (rdma_is_grh_required(ib_dev, port_num)) in copy_port_attr_to_resp() 156 if (rdma_cap_opa_ah(ib_dev, port_num)) { in copy_port_attr_to_resp() 173 resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num); in copy_port_attr_to_resp() 179 struct ib_device *ib_dev; in UVERBS_HANDLER() local 189 ib_dev = ucontext->device; in UVERBS_HANDLER() 192 if (!ib_dev->ops.query_port) in UVERBS_HANDLER() 200 ret = ib_query_port(ib_dev, port_num, &attr); in UVERBS_HANDLER() 204 copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num); in UVERBS_HANDLER() 245 struct ib_device *ib_dev; in UVERBS_HANDLER() local [all …]
|
D | uverbs_main.c | 94 if (!srcu_dereference(ufile->device->ib_dev, in ib_uverbs_get_ucontext_file() 188 struct ib_device *ib_dev; in ib_uverbs_release_file() local 194 ib_dev = srcu_dereference(file->device->ib_dev, in ib_uverbs_release_file() 196 if (ib_dev && !ib_dev->ops.disassociate_ucontext) in ib_uverbs_release_file() 197 module_put(ib_dev->ops.owner); in ib_uverbs_release_file() 482 struct ib_device *ib_dev = async_file->uobj.context->device; in ib_uverbs_init_async_event_file() local 495 INIT_IB_EVENT_HANDLER(&async_file->event_handler, ib_dev, in ib_uverbs_init_async_event_file() 886 struct ib_device *ib_dev; in ib_uverbs_open() local 898 ib_dev = srcu_dereference(dev->ib_dev, in ib_uverbs_open() 900 if (!ib_dev) { in ib_uverbs_open() [all …]
|
D | uverbs_std_types_dm.c | 56 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 60 if (!ib_dev->ops.alloc_dm) in UVERBS_HANDLER() 73 dm = ib_dev->ops.alloc_dm(ib_dev, attrs->context, &attr, attrs); in UVERBS_HANDLER() 77 dm->device = ib_dev; in UVERBS_HANDLER()
|
D | uverbs_std_types_mr.c | 53 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local 60 if (!ib_dev->ops.advise_mr) in UVERBS_HANDLER() 79 return ib_dev->ops.advise_mr(pd, advice, flags, sg_list, num_sge, in UVERBS_HANDLER() 93 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local 98 if (!ib_dev->ops.reg_dm_mr) in UVERBS_HANDLER() 119 ret = ib_check_mr_access(ib_dev, attr.access_flags); in UVERBS_HANDLER() 193 struct ib_device *ib_dev = pd->device; in UVERBS_HANDLER() local 200 if (!ib_dev->ops.reg_user_mr_dmabuf) in UVERBS_HANDLER() 236 ret = ib_check_mr_access(ib_dev, access_flags); in UVERBS_HANDLER()
|
D | uverbs_std_types_counters.c | 60 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 69 if (!ib_dev->ops.create_counters) in UVERBS_HANDLER() 72 counters = rdma_zalloc_drv_obj(ib_dev, ib_counters); in UVERBS_HANDLER() 76 counters->device = ib_dev; in UVERBS_HANDLER() 81 ret = ib_dev->ops.create_counters(counters, attrs); in UVERBS_HANDLER()
|
D | uverbs_std_types_cq.c | 67 struct ib_device *ib_dev = attrs->context->device; in UVERBS_HANDLER() local 75 if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq) in UVERBS_HANDLER() 115 cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); in UVERBS_HANDLER() 121 cq->device = ib_dev; in UVERBS_HANDLER() 131 ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); in UVERBS_HANDLER()
|
D | core_priv.h | 90 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 93 void ib_enum_roce_netdev(struct ib_device *ib_dev, 129 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port, 134 int ib_cache_gid_add(struct ib_device *ib_dev, u32 port, 137 int ib_cache_gid_del(struct ib_device *ib_dev, u32 port, 140 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, 146 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port);
|
D | uverbs_cmd.c | 210 struct ib_device *ib_dev; in ib_alloc_ucontext() local 212 ib_dev = srcu_dereference(ufile->device->ib_dev, in ib_alloc_ucontext() 214 if (!ib_dev) in ib_alloc_ucontext() 217 ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext); in ib_alloc_ucontext() 221 ucontext->device = ib_dev; in ib_alloc_ucontext() 280 struct ib_device *ib_dev; in ib_uverbs_get_context() local 292 uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev); in ib_uverbs_get_context() 328 struct ib_device *ib_dev = ucontext->device; in copy_query_dev_fields() local 331 resp->node_guid = ib_dev->node_guid; in copy_query_dev_fields() 367 resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX); in copy_query_dev_fields() [all …]
|
/drivers/infiniband/hw/usnic/ |
D | usnic_ib_main.c | 79 return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev)); in usnic_ib_dump_vf_hdr() 147 usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 150 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event() 161 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 163 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event() 170 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 173 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event() 179 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 186 dev_name(&us_ibdev->ib_dev.dev)); in usnic_ib_handle_usdev_event() 189 dev_name(&us_ibdev->ib_dev.dev), in usnic_ib_handle_usdev_event() [all …]
|
D | usnic_ib_sysfs.c | 52 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in board_id_show() 70 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in config_show() 87 dev_name(&us_ibdev->ib_dev.dev), in config_show() 109 dev_name(&us_ibdev->ib_dev.dev)); in config_show() 122 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in iface_show() 132 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in max_vf_show() 142 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in qp_per_vf_show() 156 rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); in cq_per_vf_show() 255 kobject_get(&us_ibdev->ib_dev.dev.kobj); in usnic_ib_sysfs_register_usdev() 257 &us_ibdev->ib_dev.dev.kobj); in usnic_ib_sysfs_register_usdev() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_main.c | 163 static int hns_roce_query_device(struct ib_device *ib_dev, in hns_roce_query_device() argument 167 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_device() 213 static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num, in hns_roce_query_port() argument 216 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_port() 235 ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed, in hns_roce_query_port() 238 ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret); in hns_roce_query_port() 269 static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, in hns_roce_query_pkey() argument 280 static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, in hns_roce_modify_device() argument 289 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags); in hns_roce_modify_device() 290 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE); in hns_roce_modify_device() [all …]
|
D | hns_roce_pd.c | 47 struct ib_device *ib_dev = ibpd->device; in hns_roce_alloc_pd() local 48 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_alloc_pd() 57 ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id); in hns_roce_alloc_pd() 69 ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret); in hns_roce_alloc_pd() 94 ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id); in hns_roce_uar_alloc() 129 ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id); in hns_roce_xrcd_alloc()
|
D | hns_roce_mr.c | 54 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_mr_key() 92 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_mr_pbl() 125 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_mr_free() 271 struct ib_device *ib_dev = &hr_dev->ib_dev; in hns_roce_rereg_user_mr() local 294 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret); in hns_roce_rereg_user_mr() 310 ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n", in hns_roce_rereg_user_mr() 318 ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret); in hns_roce_rereg_user_mr() 325 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret); in hns_roce_rereg_user_mr() 421 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_map_mr_sg() 532 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_alloc_mw() [all …]
|
D | hns_roce_qp.c | 244 ibdev_err(&hr_dev->ib_dev, in alloc_qpn() 462 ibdev_err(&hr_dev->ib_dev, in set_rq_size() 470 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", in set_rq_size() 610 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); in check_sq_size_with_integrity() 615 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", in check_sq_size_with_integrity() 627 struct ib_device *ibdev = &hr_dev->ib_dev; in set_user_sq_size() 705 struct ib_device *ibdev = &hr_dev->ib_dev; in set_kernel_sq_size() 798 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_qp_buf() 892 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); in qp_mmap_entry() 911 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_user_qp_db() [all …]
|
/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 108 struct ib_device *ib_dev = device->ib_device; in isert_create_qp() local 112 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); in isert_create_qp() 152 struct ib_device *ib_dev = device->ib_device; in isert_alloc_rx_descriptors() local 167 dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, in isert_alloc_rx_descriptors() 169 if (ib_dma_mapping_error(ib_dev, dma_addr)) in isert_alloc_rx_descriptors() 186 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, in isert_alloc_rx_descriptors() 198 struct ib_device *ib_dev = isert_conn->device->ib_device; in isert_free_rx_descriptors() local 207 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, in isert_free_rx_descriptors() 218 struct ib_device *ib_dev = device->ib_device; in isert_create_device_ib_res() local 222 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); in isert_create_device_ib_res() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | mad.c | 199 ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num); in update_sm_ah() 454 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix); in find_slave_port_pkey_ix() 464 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey); in find_slave_port_pkey_ix() 550 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); in mlx4_ib_send_to_slave() 577 attr.type = rdma_ah_find_type(&dev->ib_dev, port); in mlx4_ib_send_to_slave() 607 ib_dma_sync_single_for_cpu(&dev->ib_dev, in mlx4_ib_send_to_slave() 649 ib_dma_sync_single_for_device(&dev->ib_dev, in mlx4_ib_send_to_slave() 1031 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); in mlx4_ib_mad_init() 1034 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, in mlx4_ib_mad_init() 1159 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n"); in handle_slaves_guid_change() [all …]
|
D | main.c | 411 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) in mlx4_ib_gid_index_to_real_index() 943 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl); in mlx4_init_sl2vl_tbl() 1994 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); in init_node_data() 2003 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); in init_node_data() 2015 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); in hca_type_show() 2025 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); in hw_rev_show() 2035 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); in board_id_show() 2232 ib_set_device_ops(&ibdev->ib_dev, in mlx4_ib_alloc_diag_counters() 2248 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops); in mlx4_ib_alloc_diag_counters() 2355 if (ib_get_cached_port_state(&ibdev->ib_dev, port, in mlx4_ib_scan_netdevs() [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_main.c | 103 container_of(device, struct pvrdma_dev, ib_dev); in pvrdma_get_fw_ver_str() 205 dev->ib_dev.node_guid = dev->dsr->caps.node_guid; in pvrdma_register_device() 208 dev->ib_dev.num_comp_vectors = 1; in pvrdma_register_device() 209 dev->ib_dev.dev.parent = &dev->pdev->dev; in pvrdma_register_device() 211 dev->ib_dev.node_type = RDMA_NODE_IB_CA; in pvrdma_register_device() 212 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; in pvrdma_register_device() 214 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops); in pvrdma_register_device() 233 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops); in pvrdma_register_device() 241 ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1); in pvrdma_register_device() 246 ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev); in pvrdma_register_device() [all …]
|
/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 69 struct ib_device *ib_dev = device->ib_device; in iser_create_device_ib_res() local 71 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) { in iser_create_device_ib_res() 76 device->pd = ib_alloc_pd(ib_dev, in iser_create_device_ib_res() 81 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, in iser_create_device_ib_res() 110 struct ib_device *ib_dev = device->ib_device; in iser_create_fastreg_desc() local 118 if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) in iser_create_fastreg_desc() 238 struct ib_device *ib_dev; in iser_create_ib_conn_res() local 246 ib_dev = device->ib_device; in iser_create_ib_conn_res() 254 (unsigned int)ib_dev->attrs.max_qp_wr); in iser_create_ib_conn_res() 257 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ); in iser_create_ib_conn_res() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 936 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); in hw_rev_show() 963 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); in hca_type_show() 973 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); in board_id_show() 1009 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); in mthca_init_node_data() 1020 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); in mthca_init_node_data() 1050 container_of(device, struct mthca_dev, ib_dev); in get_dev_fw_str() 1142 dev->ib_dev.node_type = RDMA_NODE_IB_CA; in mthca_register_device() 1143 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; in mthca_register_device() 1144 dev->ib_dev.num_comp_vectors = 1; in mthca_register_device() 1145 dev->ib_dev.dev.parent = &dev->pdev->dev; in mthca_register_device() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_net.c | 148 ib_device_put(&rxe->ib_dev); in rxe_udp_encap_recv() 398 if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) { in rxe_loopback() 460 attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index); in rxe_init_packet() 520 rxe = ib_alloc_device(rxe_dev, ib_dev); in rxe_net_add() 528 ib_dealloc_device(&rxe->ib_dev); in rxe_net_add() 540 ev.device = &rxe->ib_dev; in rxe_port_event() 556 dev_info(&rxe->ib_dev.dev, "set active\n"); in rxe_port_up() 569 dev_info(&rxe->ib_dev.dev, "set down\n"); in rxe_port_down() 592 ib_unregister_device_queued(&rxe->ib_dev); in rxe_notify() 618 ib_device_put(&rxe->ib_dev); in rxe_notify()
|