Home
last modified time | relevance | path

Searched refs:ib_device (Results 1 – 25 of 107) sorted by relevance

12345

/drivers/infiniband/hw/ehca/
Dehca_main.c417 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); in init_node_guid()
422 ehca_err(&shca->ib_device, "Can't query device properties"); in init_node_guid()
427 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); in init_node_guid()
442 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); in ehca_init_device()
443 shca->ib_device.owner = THIS_MODULE; in ehca_init_device()
445 shca->ib_device.uverbs_abi_ver = 8; in ehca_init_device()
446 shca->ib_device.uverbs_cmd_mask = in ehca_init_device()
464 shca->ib_device.node_type = RDMA_NODE_IB_CA; in ehca_init_device()
465 shca->ib_device.phys_port_cnt = shca->num_ports; in ehca_init_device()
466 shca->ib_device.num_comp_vectors = 1; in ehca_init_device()
[all …]
Dehca_hca.c53 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) in ehca_query_device()
57 ib_device); in ehca_query_device()
76 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); in ehca_query_device()
81 ehca_err(&shca->ib_device, "Can't query device properties"); in ehca_query_device()
156 ehca_err(&shca->ib_device, "Unknown MTU size: %x.", in map_mtu()
176 ehca_err(&shca->ib_device, "invalid Vl Capability: %x.", in map_number_of_vls()
182 int ehca_query_port(struct ib_device *ibdev, in ehca_query_port()
188 ib_device); in ehca_query_port()
193 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); in ehca_query_port()
199 ehca_err(&shca->ib_device, "Can't query port properties"); in ehca_query_port()
[all …]
Dehca_irq.c104 ehca_err(&shca->ib_device, in print_error_data()
113 ehca_err(&shca->ib_device, in print_error_data()
119 ehca_err(&shca->ib_device, in print_error_data()
121 type, shca->ib_device.name); in print_error_data()
125 ehca_err(&shca->ib_device, "Error data is available: %llx.", resource); in print_error_data()
126 ehca_err(&shca->ib_device, "EHCA ----- error data begin " in print_error_data()
129 ehca_err(&shca->ib_device, "EHCA ----- error data end " in print_error_data()
145 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); in ehca_error_data()
157 ehca_err(&shca->ib_device, in ehca_error_data()
169 ehca_err(&shca->ib_device, in ehca_error_data()
[all …]
Dehca_mrmw.c163 container_of(pd->device, struct ehca_shca, ib_device); in ehca_get_dma_mr()
168 ehca_err(&shca->ib_device, "out of memory"); in ehca_get_dma_mr()
185 ehca_err(&shca->ib_device, "no internal max-MR exist!"); in ehca_get_dma_mr()
192 ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x", in ehca_get_dma_mr()
209 container_of(pd->device, struct ehca_shca, ib_device); in ehca_reg_phys_mr()
320 container_of(pd->device, struct ehca_shca, ib_device); in ehca_reg_user_mr()
451 container_of(mr->device, struct ehca_shca, ib_device); in ehca_rereg_phys_mr()
599 container_of(mr->device, struct ehca_shca, ib_device); in ehca_query_mr()
646 container_of(mr->device, struct ehca_shca, ib_device); in ehca_dereg_mr()
695 container_of(pd->device, struct ehca_shca, ib_device); in ehca_alloc_mw()
[all …]
Dehca_iverbs.h47 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
49 int ehca_query_port(struct ib_device *ibdev, u8 port,
55 int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
57 int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
60 int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
63 struct ib_pd *ehca_alloc_pd(struct ib_device *device,
129 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
183 struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
190 int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Dehca_sqp.c87 ehca_err(&shca->ib_device, in ehca_define_sqp()
93 ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x", in ehca_define_sqp()
97 ehca_err(&shca->ib_device, "invalid qp_type=%x", in ehca_define_sqp()
109 ehca_dbg(&shca->ib_device, "... wait until port %x is active", in ehca_define_sqp()
115 ehca_err(&shca->ib_device, "Port %x is not active.", port); in ehca_define_sqp()
142 static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, in ehca_process_perf()
153 container_of(ibdev, struct ehca_shca, ib_device); in ehca_process_perf()
219 int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, in ehca_process_mad()
Dehca_av.c65 ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x", in ehca_calc_ipd()
70 ret = ehca_query_port(&shca->ib_device, port, &pa); in ehca_calc_ipd()
72 ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret); in ehca_calc_ipd()
93 ib_device); in ehca_create_ah()
173 ib_device); in ehca_modify_ah()
/drivers/infiniband/hw/qib/
Dqib_dma.c48 static int qib_mapping_error(struct ib_device *dev, u64 dma_addr) in qib_mapping_error()
53 static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr, in qib_dma_map_single()
60 static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, in qib_dma_unmap_single()
66 static u64 qib_dma_map_page(struct ib_device *dev, struct page *page, in qib_dma_map_page()
88 static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, in qib_dma_unmap_page()
94 static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, in qib_map_sg()
119 static void qib_unmap_sg(struct ib_device *dev, in qib_unmap_sg()
126 static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, in qib_sync_single_for_cpu()
131 static void qib_sync_single_for_device(struct ib_device *dev, u64 addr, in qib_sync_single_for_device()
137 static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size, in qib_dma_alloc_coherent()
[all …]
Dqib_mad.c266 struct ib_device *ibdev) in subn_get_nodedescription()
276 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_nodeinfo()
312 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_guidinfo()
451 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_portinfo()
593 static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_pkeytable()
617 static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_set_guidinfo()
650 static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_set_portinfo()
1037 static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, in subn_set_pkeytable()
1055 static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_sl_to_vl()
1073 static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, in subn_set_sl_to_vl()
[all …]
/drivers/infiniband/hw/ipath/
Dipath_dma.c50 static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr) in ipath_mapping_error()
55 static u64 ipath_dma_map_single(struct ib_device *dev, in ipath_dma_map_single()
63 static void ipath_dma_unmap_single(struct ib_device *dev, in ipath_dma_unmap_single()
70 static u64 ipath_dma_map_page(struct ib_device *dev, in ipath_dma_map_page()
94 static void ipath_dma_unmap_page(struct ib_device *dev, in ipath_dma_unmap_page()
101 static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl, in ipath_map_sg()
126 static void ipath_unmap_sg(struct ib_device *dev, in ipath_unmap_sg()
133 static void ipath_sync_single_for_cpu(struct ib_device *dev, in ipath_sync_single_for_cpu()
140 static void ipath_sync_single_for_device(struct ib_device *dev, in ipath_sync_single_for_device()
147 static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size, in ipath_dma_alloc_coherent()
[all …]
Dipath_mad.c59 struct ib_device *ibdev) in recv_subn_get_nodedescription()
85 struct ib_device *ibdev, u8 port) in recv_subn_get_nodeinfo()
123 struct ib_device *ibdev) in recv_subn_get_guidinfo()
236 struct ib_device *ibdev, u8 port) in recv_subn_get_portinfo()
361 struct ib_device *ibdev) in recv_subn_get_pkeytable()
385 struct ib_device *ibdev) in recv_subn_set_guidinfo()
418 struct ib_device *ibdev, u8 port) in recv_subn_set_portinfo()
782 struct ib_device *ibdev, u8 port) in recv_subn_set_pkeytable()
835 struct ib_device *ibdev, u8 port) in recv_pma_get_portsamplescontrol()
884 struct ib_device *ibdev, u8 port) in recv_pma_set_portsamplescontrol()
[all …]
/drivers/infiniband/core/
Ddevice.c70 static int ib_device_check_mandatory(struct ib_device *device) in ib_device_check_mandatory()
72 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } in ib_device_check_mandatory()
110 static struct ib_device *__ib_device_get_by_name(const char *name) in __ib_device_get_by_name()
112 struct ib_device *device; in __ib_device_get_by_name()
126 struct ib_device *device; in alloc_name()
154 static int start_port(struct ib_device *device) in start_port()
160 static int end_port(struct ib_device *device) in end_port()
176 struct ib_device *ib_alloc_device(size_t size) in ib_alloc_device()
178 BUG_ON(size < sizeof (struct ib_device)); in ib_alloc_device()
190 void ib_dealloc_device(struct ib_device *device) in ib_dealloc_device()
[all …]
Dsysfs.c45 struct ib_device *ibdev;
457 struct ib_device *dev = container_of(device, struct ib_device, dev); in ib_device_release()
465 struct ib_device *dev = container_of(device, struct ib_device, dev); in ib_device_uevent()
520 static int add_port(struct ib_device *device, int port_num, in add_port()
521 int (*port_callback)(struct ib_device *, in add_port() argument
617 struct ib_device *dev = container_of(device, struct ib_device, dev); in show_node_type()
633 struct ib_device *dev = container_of(device, struct ib_device, dev); in show_sys_image_guid()
651 struct ib_device *dev = container_of(device, struct ib_device, dev); in show_node_guid()
663 struct ib_device *dev = container_of(device, struct ib_device, dev); in show_node_desc()
672 struct ib_device *dev = container_of(device, struct ib_device, dev); in set_node_desc()
[all …]
Dcache.c57 struct ib_device *device;
61 static inline int start_port(struct ib_device *device) in start_port()
66 static inline int end_port(struct ib_device *device) in end_port()
72 int ib_get_cached_gid(struct ib_device *device, in ib_get_cached_gid()
99 int ib_find_cached_gid(struct ib_device *device, in ib_find_cached_gid()
134 int ib_get_cached_pkey(struct ib_device *device, in ib_get_cached_pkey()
161 int ib_find_cached_pkey(struct ib_device *device, in ib_find_cached_pkey()
202 int ib_find_exact_cached_pkey(struct ib_device *device, in ib_find_exact_cached_pkey()
234 int ib_get_cached_lmc(struct ib_device *device, in ib_get_cached_lmc()
252 static void ib_cache_update(struct ib_device *device, in ib_cache_update()
[all …]
Dagent.h43 extern int ib_agent_port_open(struct ib_device *device, int port_num);
45 extern int ib_agent_port_close(struct ib_device *device, int port_num);
48 struct ib_wc *wc, struct ib_device *device,
Dcore_priv.h41 int ib_device_register_sysfs(struct ib_device *device,
42 int (*port_callback)(struct ib_device *,
44 void ib_device_unregister_sysfs(struct ib_device *device);
Dagent.c57 __ib_get_agent_port(struct ib_device *device, int port_num) in __ib_get_agent_port()
70 ib_get_agent_port(struct ib_device *device, int port_num) in ib_get_agent_port()
82 struct ib_wc *wc, struct ib_device *device, in agent_send_response()
145 int ib_agent_port_open(struct ib_device *device, int port_num) in ib_agent_port_open()
196 int ib_agent_port_close(struct ib_device *device, int port_num) in ib_agent_port_close()
/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.h39 int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props);
40 int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
41 int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
45 int ocrdma_query_gid(struct ib_device *, u8 port,
47 int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
49 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
55 struct ib_pd *ocrdma_alloc_pd(struct ib_device *,
59 struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector,
94 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
/drivers/infiniband/hw/usnic/
Dusnic_ib_verbs.h24 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
26 int usnic_ib_query_device(struct ib_device *ibdev,
28 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
33 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
35 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
37 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
47 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
55 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
/drivers/infiniband/ulp/iser/
Diser_memory.c52 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device; in iser_start_rdma_unaligned_sg()
115 struct ib_device *dev; in iser_finalize_rdma_unaligned_sg()
118 dev = iser_task->iser_conn->ib_conn.device->ib_device; in iser_finalize_rdma_unaligned_sg()
175 struct ib_device *ibdev, u64 *pages, in iser_sg_to_page_vec()
228 struct ib_device *ibdev) in iser_data_buf_aligned_len()
267 struct ib_device *ibdev) in iser_data_buf_dump()
293 struct ib_device *ibdev) in iser_page_vec_build()
321 struct ib_device *dev; in iser_dma_map_task_data()
324 dev = iser_task->iser_conn->ib_conn.device->ib_device; in iser_dma_map_task_data()
338 struct ib_device *dev; in iser_dma_unmap_task_data()
[all …]
Diser_initiator.c168 ib_dma_sync_single_for_cpu(device->ib_device, in iser_create_send_desc()
190 ib_dma_unmap_single(device->ib_device, in iser_free_login_buf()
195 ib_dma_unmap_single(device->ib_device, in iser_free_login_buf()
223 iser_conn->login_req_dma = ib_dma_map_single(device->ib_device, in iser_alloc_login_buf()
228 iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device, in iser_alloc_login_buf()
233 req_err = ib_dma_mapping_error(device->ib_device, in iser_alloc_login_buf()
235 resp_err = ib_dma_mapping_error(device->ib_device, in iser_alloc_login_buf()
284 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, in iser_alloc_rx_descriptors()
286 if (ib_dma_mapping_error(device->ib_device, dma_addr)) in iser_alloc_rx_descriptors()
303 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, in iser_alloc_rx_descriptors()
[all …]
Diser_verbs.c81 ret = ib_query_device(device->ib_device, dev_attr); in iser_create_device_ib_res()
83 pr_warn("Query device failed for %s\n", device->ib_device->name); in iser_create_device_ib_res()
88 if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr && in iser_create_device_ib_res()
89 device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) { in iser_create_device_ib_res()
108 device->ib_device->num_comp_vectors); in iser_create_device_ib_res()
110 device->comps_used, device->ib_device->name, in iser_create_device_ib_res()
111 device->ib_device->num_comp_vectors); in iser_create_device_ib_res()
113 device->pd = ib_alloc_pd(device->ib_device); in iser_create_device_ib_res()
121 comp->cq = ib_create_cq(device->ib_device, in iser_create_device_ib_res()
144 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, in iser_create_device_ib_res()
[all …]
/drivers/infiniband/hw/mlx4/
Dmlx4_ib.h405 struct ib_device *ib_dev;
415 struct ib_device *ib_dev;
498 struct ib_device ib_dev;
545 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) in to_mdev()
645 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
651 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
691 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
703 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
705 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
708 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
[all …]
Dcm.c135 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) in gid_from_req_msg()
144 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) in id_map_find_by_sl_id()
192 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) in id_map_find_del()
210 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) in sl_id_map_add()
243 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) in id_map_alloc()
284 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id) in id_map_get()
301 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) in schedule_delayed()
317 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, in mlx4_ib_multiplex_cm_handler()
358 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, in mlx4_ib_demux_cm_handler()
/drivers/infiniband/ulp/isert/
Dib_isert.c93 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) in isert_query_device()
178 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; in isert_alloc_rx_descriptors()
223 struct ib_device *ib_dev = isert_conn->conn_device->ib_device; in isert_free_rx_descriptors()
248 struct ib_device *ib_dev = device->ib_device; in isert_create_device_ib_res()
279 device->ib_device->num_comp_vectors); in isert_create_device_ib_res()
283 device->cqs_used, device->ib_device->name, in isert_create_device_ib_res()
284 device->ib_device->num_comp_vectors, device->use_fastreg, in isert_create_device_ib_res()
299 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, in isert_create_device_ib_res()
311 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, in isert_create_device_ib_res()
392 if (device->ib_device->node_guid == cma_id->device->node_guid) { in isert_device_find_by_ib_dev()
[all …]

12345