Lines Matching refs:ib_conn
199 int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max) in iser_create_fmr_pool() argument
201 struct iser_device *device = ib_conn->device; in iser_create_fmr_pool()
205 ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) + in iser_create_fmr_pool()
208 if (!ib_conn->fmr.page_vec) in iser_create_fmr_pool()
211 ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1); in iser_create_fmr_pool()
227 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); in iser_create_fmr_pool()
228 if (!IS_ERR(ib_conn->fmr.pool)) in iser_create_fmr_pool()
232 kfree(ib_conn->fmr.page_vec); in iser_create_fmr_pool()
233 ib_conn->fmr.page_vec = NULL; in iser_create_fmr_pool()
235 ret = PTR_ERR(ib_conn->fmr.pool); in iser_create_fmr_pool()
236 ib_conn->fmr.pool = NULL; in iser_create_fmr_pool()
249 void iser_free_fmr_pool(struct ib_conn *ib_conn) in iser_free_fmr_pool() argument
252 ib_conn, ib_conn->fmr.pool); in iser_free_fmr_pool()
254 if (ib_conn->fmr.pool != NULL) in iser_free_fmr_pool()
255 ib_destroy_fmr_pool(ib_conn->fmr.pool); in iser_free_fmr_pool()
257 ib_conn->fmr.pool = NULL; in iser_free_fmr_pool()
259 kfree(ib_conn->fmr.page_vec); in iser_free_fmr_pool()
260 ib_conn->fmr.page_vec = NULL; in iser_free_fmr_pool()
353 int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max) in iser_create_fastreg_pool() argument
355 struct iser_device *device = ib_conn->device; in iser_create_fastreg_pool()
359 INIT_LIST_HEAD(&ib_conn->fastreg.pool); in iser_create_fastreg_pool()
360 ib_conn->fastreg.pool_size = 0; in iser_create_fastreg_pool()
370 ib_conn->pi_support, desc); in iser_create_fastreg_pool()
378 list_add_tail(&desc->list, &ib_conn->fastreg.pool); in iser_create_fastreg_pool()
379 ib_conn->fastreg.pool_size++; in iser_create_fastreg_pool()
385 iser_free_fastreg_pool(ib_conn); in iser_create_fastreg_pool()
392 void iser_free_fastreg_pool(struct ib_conn *ib_conn) in iser_free_fastreg_pool() argument
397 if (list_empty(&ib_conn->fastreg.pool)) in iser_free_fastreg_pool()
400 iser_info("freeing conn %p fr pool\n", ib_conn); in iser_free_fastreg_pool()
402 list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) { in iser_free_fastreg_pool()
416 if (i < ib_conn->fastreg.pool_size) in iser_free_fastreg_pool()
418 ib_conn->fastreg.pool_size - i); in iser_free_fastreg_pool()
426 static int iser_create_ib_conn_res(struct ib_conn *ib_conn) in iser_create_ib_conn_res() argument
433 BUG_ON(ib_conn->device == NULL); in iser_create_ib_conn_res()
435 device = ib_conn->device; in iser_create_ib_conn_res()
446 ib_conn->comp = &device->comps[min_index]; in iser_create_ib_conn_res()
447 ib_conn->comp->active_qps++; in iser_create_ib_conn_res()
449 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); in iser_create_ib_conn_res()
452 init_attr.qp_context = (void *)ib_conn; in iser_create_ib_conn_res()
453 init_attr.send_cq = ib_conn->comp->cq; in iser_create_ib_conn_res()
454 init_attr.recv_cq = ib_conn->comp->cq; in iser_create_ib_conn_res()
460 if (ib_conn->pi_support) { in iser_create_ib_conn_res()
467 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); in iser_create_ib_conn_res()
471 ib_conn->qp = ib_conn->cma_id->qp; in iser_create_ib_conn_res()
473 ib_conn, ib_conn->cma_id, in iser_create_ib_conn_res()
474 ib_conn->cma_id->qp); in iser_create_ib_conn_res()
581 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_free_ib_conn_res() local
582 struct iser_device *device = ib_conn->device; in iser_free_ib_conn_res()
585 iser_conn, ib_conn->cma_id, ib_conn->qp); in iser_free_ib_conn_res()
587 if (ib_conn->qp != NULL) { in iser_free_ib_conn_res()
588 ib_conn->comp->active_qps--; in iser_free_ib_conn_res()
589 rdma_destroy_qp(ib_conn->cma_id); in iser_free_ib_conn_res()
590 ib_conn->qp = NULL; in iser_free_ib_conn_res()
599 ib_conn->device = NULL; in iser_free_ib_conn_res()
609 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_conn_release() local
626 if (ib_conn->cma_id != NULL) { in iser_conn_release()
627 rdma_destroy_id(ib_conn->cma_id); in iser_conn_release()
628 ib_conn->cma_id = NULL; in iser_conn_release()
640 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_conn_terminate() local
660 if (ib_conn->cma_id) { in iser_conn_terminate()
661 err = rdma_disconnect(ib_conn->cma_id); in iser_conn_terminate()
667 err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr); in iser_conn_terminate()
669 iser_err("conn %p failed to post beacon", ib_conn); in iser_conn_terminate()
671 wait_for_completion(&ib_conn->flush_comp); in iser_conn_terminate()
695 struct ib_conn *ib_conn; in iser_addr_handler() local
703 ib_conn = &iser_conn->ib_conn; in iser_addr_handler()
711 ib_conn->device = device; in iser_addr_handler()
719 ib_conn->device->ib_device->name); in iser_addr_handler()
720 ib_conn->pi_support = false; in iser_addr_handler()
722 ib_conn->pi_support = true; in iser_addr_handler()
743 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_route_handler() local
744 struct iser_device *device = ib_conn->device; in iser_route_handler()
750 ret = iser_create_ib_conn_res(ib_conn); in iser_route_handler()
861 iser_conn->ib_conn.cma_id = NULL; in iser_cma_handler()
879 iser_conn->ib_conn.post_recv_buf_count = 0; in iser_conn_init()
880 init_completion(&iser_conn->ib_conn.flush_comp); in iser_conn_init()
885 spin_lock_init(&iser_conn->ib_conn.lock); in iser_conn_init()
898 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_connect() local
908 ib_conn->device = NULL; in iser_connect()
912 ib_conn->beacon.wr_id = ISER_BEACON_WRID; in iser_connect()
913 ib_conn->beacon.opcode = IB_WR_SEND; in iser_connect()
915 ib_conn->cma_id = rdma_create_id(iser_cma_handler, in iser_connect()
918 if (IS_ERR(ib_conn->cma_id)) { in iser_connect()
919 err = PTR_ERR(ib_conn->cma_id); in iser_connect()
924 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); in iser_connect()
946 ib_conn->cma_id = NULL; in iser_connect()
960 int iser_reg_page_vec(struct ib_conn *ib_conn, in iser_reg_page_vec() argument
972 mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool, in iser_reg_page_vec()
1031 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_unreg_mem_fastreg() local
1039 spin_lock_bh(&ib_conn->lock); in iser_unreg_mem_fastreg()
1040 list_add_tail(&desc->list, &ib_conn->fastreg.pool); in iser_unreg_mem_fastreg()
1041 spin_unlock_bh(&ib_conn->lock); in iser_unreg_mem_fastreg()
1047 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_post_recvl() local
1053 sge.lkey = ib_conn->device->mr->lkey; in iser_post_recvl()
1060 ib_conn->post_recv_buf_count++; in iser_post_recvl()
1061 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); in iser_post_recvl()
1064 ib_conn->post_recv_buf_count--; in iser_post_recvl()
1073 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_post_recvm() local
1077 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { in iser_post_recvm()
1089 ib_conn->post_recv_buf_count += count; in iser_post_recvm()
1090 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); in iser_post_recvm()
1093 ib_conn->post_recv_buf_count -= count; in iser_post_recvm()
1105 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, in iser_post_send() argument
1111 ib_dma_sync_single_for_device(ib_conn->device->ib_device, in iser_post_send()
1122 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); in iser_post_send()
1163 iser_handle_comp_error(struct ib_conn *ib_conn, in iser_handle_comp_error() argument
1167 struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn, in iser_handle_comp_error()
1168 ib_conn); in iser_handle_comp_error()
1181 ib_conn->post_recv_buf_count--; in iser_handle_comp_error()
1195 struct ib_conn *ib_conn; in iser_handle_wc() local
1199 ib_conn = wc->qp->qp_context; in iser_handle_wc()
1204 ib_conn); in iser_handle_wc()
1208 iser_snd_completion(tx_desc, ib_conn); in iser_handle_wc()
1221 iser_handle_comp_error(ib_conn, wc); in iser_handle_wc()
1225 complete(&ib_conn->flush_comp); in iser_handle_wc()