Home
last modified time | relevance | path

Searched refs:ib_device (Results 1 – 25 of 97) sorted by relevance

1234

/drivers/infiniband/hw/ehca/
Dehca_main.c416 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); in init_node_guid()
421 ehca_err(&shca->ib_device, "Can't query device properties"); in init_node_guid()
426 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); in init_node_guid()
441 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); in ehca_init_device()
442 shca->ib_device.owner = THIS_MODULE; in ehca_init_device()
444 shca->ib_device.uverbs_abi_ver = 8; in ehca_init_device()
445 shca->ib_device.uverbs_cmd_mask = in ehca_init_device()
463 shca->ib_device.node_type = RDMA_NODE_IB_CA; in ehca_init_device()
464 shca->ib_device.phys_port_cnt = shca->num_ports; in ehca_init_device()
465 shca->ib_device.num_comp_vectors = 1; in ehca_init_device()
[all …]
Dehca_hca.c53 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) in ehca_query_device()
57 ib_device); in ehca_query_device()
76 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); in ehca_query_device()
81 ehca_err(&shca->ib_device, "Can't query device properties"); in ehca_query_device()
156 ehca_err(&shca->ib_device, "Unknown MTU size: %x.", in map_mtu()
176 ehca_err(&shca->ib_device, "invalid Vl Capability: %x.", in map_number_of_vls()
182 int ehca_query_port(struct ib_device *ibdev, in ehca_query_port()
188 ib_device); in ehca_query_port()
193 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); in ehca_query_port()
199 ehca_err(&shca->ib_device, "Can't query port properties"); in ehca_query_port()
[all …]
Dehca_irq.c104 ehca_err(&shca->ib_device, in print_error_data()
113 ehca_err(&shca->ib_device, in print_error_data()
119 ehca_err(&shca->ib_device, in print_error_data()
121 type, shca->ib_device.name); in print_error_data()
125 ehca_err(&shca->ib_device, "Error data is available: %llx.", resource); in print_error_data()
126 ehca_err(&shca->ib_device, "EHCA ----- error data begin " in print_error_data()
129 ehca_err(&shca->ib_device, "EHCA ----- error data end " in print_error_data()
145 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); in ehca_error_data()
157 ehca_err(&shca->ib_device, in ehca_error_data()
169 ehca_err(&shca->ib_device, in ehca_error_data()
[all …]
Dehca_iverbs.h47 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
49 int ehca_query_port(struct ib_device *ibdev, u8 port,
55 int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
57 int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
60 int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
63 struct ib_pd *ehca_alloc_pd(struct ib_device *device,
129 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
183 struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
190 int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Dehca_mrmw.c163 container_of(pd->device, struct ehca_shca, ib_device); in ehca_get_dma_mr()
168 ehca_err(&shca->ib_device, "out of memory"); in ehca_get_dma_mr()
185 ehca_err(&shca->ib_device, "no internal max-MR exist!"); in ehca_get_dma_mr()
192 ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x", in ehca_get_dma_mr()
209 container_of(pd->device, struct ehca_shca, ib_device); in ehca_reg_phys_mr()
320 container_of(pd->device, struct ehca_shca, ib_device); in ehca_reg_user_mr()
454 container_of(mr->device, struct ehca_shca, ib_device); in ehca_rereg_phys_mr()
602 container_of(mr->device, struct ehca_shca, ib_device); in ehca_query_mr()
649 container_of(mr->device, struct ehca_shca, ib_device); in ehca_dereg_mr()
698 container_of(pd->device, struct ehca_shca, ib_device); in ehca_alloc_mw()
[all …]
Dehca_sqp.c87 ehca_err(&shca->ib_device, in ehca_define_sqp()
93 ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x", in ehca_define_sqp()
97 ehca_err(&shca->ib_device, "invalid qp_type=%x", in ehca_define_sqp()
109 ehca_dbg(&shca->ib_device, "... wait until port %x is active", in ehca_define_sqp()
115 ehca_err(&shca->ib_device, "Port %x is not active.", port); in ehca_define_sqp()
142 static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, in ehca_process_perf()
153 container_of(ibdev, struct ehca_shca, ib_device); in ehca_process_perf()
219 int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, in ehca_process_mad()
Dehca_av.c65 ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x", in ehca_calc_ipd()
70 ret = ehca_query_port(&shca->ib_device, port, &pa); in ehca_calc_ipd()
72 ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret); in ehca_calc_ipd()
93 ib_device); in ehca_create_ah()
173 ib_device); in ehca_modify_ah()
Dehca_eq.c62 struct ib_device *ib_dev = &shca->ib_device; in ehca_create_eq()
183 ehca_err(&shca->ib_device, "Can't free EQ resources."); in ehca_destroy_eq()
/drivers/infiniband/hw/qib/
Dqib_dma.c48 static int qib_mapping_error(struct ib_device *dev, u64 dma_addr) in qib_mapping_error()
53 static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr, in qib_dma_map_single()
60 static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, in qib_dma_unmap_single()
66 static u64 qib_dma_map_page(struct ib_device *dev, struct page *page, in qib_dma_map_page()
88 static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, in qib_dma_unmap_page()
94 static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, in qib_map_sg()
115 static void qib_unmap_sg(struct ib_device *dev, in qib_unmap_sg()
122 static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) in qib_sg_dma_address()
131 static unsigned int qib_sg_dma_len(struct ib_device *dev, in qib_sg_dma_len()
137 static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, in qib_sync_single_for_cpu()
[all …]
Dqib_mad.c266 struct ib_device *ibdev) in subn_get_nodedescription()
276 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_nodeinfo()
312 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_guidinfo()
451 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_portinfo()
593 static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_pkeytable()
617 static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_set_guidinfo()
650 static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, in subn_set_portinfo()
1037 static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, in subn_set_pkeytable()
1055 static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, in subn_get_sl_to_vl()
1073 static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, in subn_set_sl_to_vl()
[all …]
/drivers/infiniband/hw/ipath/
Dipath_dma.c50 static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr) in ipath_mapping_error()
55 static u64 ipath_dma_map_single(struct ib_device *dev, in ipath_dma_map_single()
63 static void ipath_dma_unmap_single(struct ib_device *dev, in ipath_dma_unmap_single()
70 static u64 ipath_dma_map_page(struct ib_device *dev, in ipath_dma_map_page()
94 static void ipath_dma_unmap_page(struct ib_device *dev, in ipath_dma_unmap_page()
101 static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl, in ipath_map_sg()
122 static void ipath_unmap_sg(struct ib_device *dev, in ipath_unmap_sg()
129 static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) in ipath_sg_dma_address()
138 static unsigned int ipath_sg_dma_len(struct ib_device *dev, in ipath_sg_dma_len()
144 static void ipath_sync_single_for_cpu(struct ib_device *dev, in ipath_sync_single_for_cpu()
[all …]
Dipath_mad.c59 struct ib_device *ibdev) in recv_subn_get_nodedescription()
85 struct ib_device *ibdev, u8 port) in recv_subn_get_nodeinfo()
123 struct ib_device *ibdev) in recv_subn_get_guidinfo()
236 struct ib_device *ibdev, u8 port) in recv_subn_get_portinfo()
361 struct ib_device *ibdev) in recv_subn_get_pkeytable()
385 struct ib_device *ibdev) in recv_subn_set_guidinfo()
418 struct ib_device *ibdev, u8 port) in recv_subn_set_portinfo()
776 struct ib_device *ibdev) in recv_subn_set_pkeytable()
829 struct ib_device *ibdev, u8 port) in recv_pma_get_portsamplescontrol()
878 struct ib_device *ibdev, u8 port) in recv_pma_set_portsamplescontrol()
[all …]
/drivers/infiniband/core/
Ddevice.c70 static int ib_device_check_mandatory(struct ib_device *device) in ib_device_check_mandatory()
72 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } in ib_device_check_mandatory()
110 static struct ib_device *__ib_device_get_by_name(const char *name) in __ib_device_get_by_name()
112 struct ib_device *device; in __ib_device_get_by_name()
126 struct ib_device *device; in alloc_name()
154 static int start_port(struct ib_device *device) in start_port()
160 static int end_port(struct ib_device *device) in end_port()
176 struct ib_device *ib_alloc_device(size_t size) in ib_alloc_device()
178 BUG_ON(size < sizeof (struct ib_device)); in ib_alloc_device()
190 void ib_dealloc_device(struct ib_device *device) in ib_dealloc_device()
[all …]
Dsysfs.c45 struct ib_device *ibdev;
453 struct ib_device *dev = container_of(device, struct ib_device, dev); in ib_device_release()
461 struct ib_device *dev = container_of(device, struct ib_device, dev); in ib_device_uevent()
516 static int add_port(struct ib_device *device, int port_num, in add_port()
517 int (*port_callback)(struct ib_device *, in add_port() argument
606 struct ib_device *dev = container_of(device, struct ib_device, dev); in show_node_type()
620 struct ib_device *dev = container_of(device, struct ib_device, dev); in show_sys_image_guid()
638 struct ib_device *dev = container_of(device, struct ib_device, dev); in show_node_guid()
650 struct ib_device *dev = container_of(device, struct ib_device, dev); in show_node_desc()
659 struct ib_device *dev = container_of(device, struct ib_device, dev); in set_node_desc()
[all …]
Dcache.c57 struct ib_device *device;
61 static inline int start_port(struct ib_device *device) in start_port()
66 static inline int end_port(struct ib_device *device) in end_port()
72 int ib_get_cached_gid(struct ib_device *device, in ib_get_cached_gid()
99 int ib_find_cached_gid(struct ib_device *device, in ib_find_cached_gid()
134 int ib_get_cached_pkey(struct ib_device *device, in ib_get_cached_pkey()
161 int ib_find_cached_pkey(struct ib_device *device, in ib_find_cached_pkey()
202 int ib_find_exact_cached_pkey(struct ib_device *device, in ib_find_exact_cached_pkey()
234 int ib_get_cached_lmc(struct ib_device *device, in ib_get_cached_lmc()
252 static void ib_cache_update(struct ib_device *device, in ib_cache_update()
[all …]
Dcore_priv.h41 int ib_device_register_sysfs(struct ib_device *device,
42 int (*port_callback)(struct ib_device *,
44 void ib_device_unregister_sysfs(struct ib_device *device);
Dagent.h43 extern int ib_agent_port_open(struct ib_device *device, int port_num);
45 extern int ib_agent_port_close(struct ib_device *device, int port_num);
48 struct ib_wc *wc, struct ib_device *device,
Dagent.c57 __ib_get_agent_port(struct ib_device *device, int port_num) in __ib_get_agent_port()
70 ib_get_agent_port(struct ib_device *device, int port_num) in ib_get_agent_port()
82 struct ib_wc *wc, struct ib_device *device, in agent_send_response()
145 int ib_agent_port_open(struct ib_device *device, int port_num) in ib_agent_port_open()
196 int ib_agent_port_close(struct ib_device *device, int port_num) in ib_agent_port_close()
/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.h39 int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props);
40 int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
41 int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
45 int ocrdma_query_gid(struct ib_device *, u8 port,
47 int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
49 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
55 struct ib_pd *ocrdma_alloc_pd(struct ib_device *,
59 struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector,
/drivers/infiniband/ulp/iser/
Diser_memory.c51 struct ib_device *dev; in iser_start_rdma_unaligned_sg()
93 dev = iser_task->iser_conn->ib_conn->device->ib_device; in iser_start_rdma_unaligned_sg()
111 struct ib_device *dev; in iser_finalize_rdma_unaligned_sg()
115 dev = iser_task->iser_conn->ib_conn->device->ib_device; in iser_finalize_rdma_unaligned_sg()
174 struct ib_device *ibdev) in iser_sg_to_page_vec()
225 struct ib_device *ibdev) in iser_data_buf_aligned_len()
264 struct ib_device *ibdev) in iser_data_buf_dump()
293 struct ib_device *ibdev) in iser_page_vec_build()
319 struct ib_device *dev; in iser_dma_map_task_data()
322 dev = iser_task->iser_conn->ib_conn->device->ib_device; in iser_dma_map_task_data()
[all …]
Diser_verbs.c77 device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); in iser_create_device_ib_res()
79 device->cqs_used, device->ib_device->name, in iser_create_device_ib_res()
80 device->ib_device->num_comp_vectors); in iser_create_device_ib_res()
88 device->pd = ib_alloc_pd(device->ib_device); in iser_create_device_ib_res()
96 device->rx_cq[i] = ib_create_cq(device->ib_device, in iser_create_device_ib_res()
104 device->tx_cq[i] = ib_create_cq(device->ib_device, in iser_create_device_ib_res()
126 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, in iser_create_device_ib_res()
205 ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device, in iser_create_ib_conn_res()
209 ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device, in iser_create_ib_conn_res()
213 req_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma); in iser_create_ib_conn_res()
[all …]
Diser_initiator.c159 ib_dma_sync_single_for_cpu(device->ib_device, in iser_create_send_desc()
190 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, in iser_alloc_rx_descriptors()
192 if (ib_dma_mapping_error(device->ib_device, dma_addr)) in iser_alloc_rx_descriptors()
209 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, in iser_alloc_rx_descriptors()
229 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, in iser_free_rx_descriptors()
410 ib_dma_sync_single_for_cpu(device->ib_device, in iser_send_control()
417 ib_dma_sync_single_for_device(device->ib_device, in iser_send_control()
466 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma, in iser_rcv_completion()
477 ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, in iser_rcv_completion()
506 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, in iser_snd_completion()
/drivers/infiniband/hw/mlx4/
Dmlx4_ib.h377 struct ib_device *ib_dev;
387 struct ib_device *ib_dev;
466 struct ib_device ib_dev;
506 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) in to_mdev()
600 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
606 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
646 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
658 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
660 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
663 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
[all …]
Dcm.c96 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) in gid_from_req_msg()
105 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id) in id_map_find_by_sl_id()
153 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id) in id_map_find_del()
171 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new) in sl_id_map_add()
204 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) in id_map_alloc()
245 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id) in id_map_get()
262 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) in schedule_delayed()
278 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id, in mlx4_ib_multiplex_cm_handler()
317 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, in mlx4_ib_demux_cm_handler()
/drivers/infiniband/ulp/isert/
Dib_isert.c63 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) in isert_query_device()
143 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; in isert_alloc_rx_descriptors()
188 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; in isert_free_rx_descriptors()
211 struct ib_device *ib_dev = device->ib_device; in isert_create_device_ib_res()
216 device->ib_device->num_comp_vectors); in isert_create_device_ib_res()
219 device->cqs_used, device->ib_device->name, in isert_create_device_ib_res()
220 device->ib_device->num_comp_vectors); in isert_create_device_ib_res()
240 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, in isert_create_device_ib_res()
248 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, in isert_create_device_ib_res()
336 if (device->ib_device->node_guid == cma_id->device->node_guid) { in isert_device_find_by_ib_dev()
[all …]

1234