Lines Matching refs:assoc
35 struct nvmet_fc_tgt_assoc *assoc; member
142 struct nvmet_fc_tgt_assoc *assoc; member
201 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) in nvmet_fc_makeconnid() argument
203 return (assoc->association_id | qid); in nvmet_fc_makeconnid()
244 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
245 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
252 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
475 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) in nvmet_fc_xmt_disconnect_assoc() argument
477 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_xmt_disconnect_assoc()
489 if (!tgtport->ops->ls_req || !assoc->hostport || in nvmet_fc_xmt_disconnect_assoc()
490 assoc->hostport->invalid) in nvmet_fc_xmt_disconnect_assoc()
499 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_xmt_disconnect_assoc()
512 lsop->hosthandle = assoc->hostport->hosthandle; in nvmet_fc_xmt_disconnect_assoc()
515 assoc->association_id); in nvmet_fc_xmt_disconnect_assoc()
522 tgtport->fc_target_port.port_num, assoc->a_id, ret); in nvmet_fc_xmt_disconnect_assoc()
789 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, in nvmet_fc_alloc_target_queue() argument
803 if (!nvmet_fc_tgt_a_get(assoc)) in nvmet_fc_alloc_target_queue()
807 assoc->tgtport->fc_target_port.port_num, in nvmet_fc_alloc_target_queue()
808 assoc->a_id, qid); in nvmet_fc_alloc_target_queue()
814 queue->assoc = assoc; in nvmet_fc_alloc_target_queue()
825 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
831 WARN_ON(assoc->queues[qid]); in nvmet_fc_alloc_target_queue()
832 spin_lock_irqsave(&assoc->tgtport->lock, flags); in nvmet_fc_alloc_target_queue()
833 assoc->queues[qid] = queue; in nvmet_fc_alloc_target_queue()
834 spin_unlock_irqrestore(&assoc->tgtport->lock, flags); in nvmet_fc_alloc_target_queue()
839 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
842 nvmet_fc_tgt_a_put(assoc); in nvmet_fc_alloc_target_queue()
856 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); in nvmet_fc_tgt_queue_free()
857 queue->assoc->queues[queue->qid] = NULL; in nvmet_fc_tgt_queue_free()
858 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); in nvmet_fc_tgt_queue_free()
860 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); in nvmet_fc_tgt_queue_free()
862 nvmet_fc_tgt_a_put(queue->assoc); in nvmet_fc_tgt_queue_free()
885 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; in nvmet_fc_delete_target_queue()
964 struct nvmet_fc_tgt_assoc *assoc; in nvmet_fc_find_target_queue() local
974 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_queue()
975 if (association_id == assoc->association_id) { in nvmet_fc_find_target_queue()
976 queue = assoc->queues[qid]; in nvmet_fc_find_target_queue()
1088 struct nvmet_fc_tgt_assoc *assoc = in nvmet_fc_delete_assoc() local
1091 nvmet_fc_delete_target_assoc(assoc); in nvmet_fc_delete_assoc()
1092 nvmet_fc_tgt_a_put(assoc); in nvmet_fc_delete_assoc()
1098 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; in nvmet_fc_alloc_target_assoc() local
1104 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); in nvmet_fc_alloc_target_assoc()
1105 if (!assoc) in nvmet_fc_alloc_target_assoc()
1115 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); in nvmet_fc_alloc_target_assoc()
1116 if (IS_ERR(assoc->hostport)) in nvmet_fc_alloc_target_assoc()
1119 assoc->tgtport = tgtport; in nvmet_fc_alloc_target_assoc()
1120 assoc->a_id = idx; in nvmet_fc_alloc_target_assoc()
1121 INIT_LIST_HEAD(&assoc->a_list); in nvmet_fc_alloc_target_assoc()
1122 kref_init(&assoc->ref); in nvmet_fc_alloc_target_assoc()
1123 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); in nvmet_fc_alloc_target_assoc()
1124 atomic_set(&assoc->terminating, 0); in nvmet_fc_alloc_target_assoc()
1139 assoc->association_id = ran; in nvmet_fc_alloc_target_assoc()
1140 list_add_tail(&assoc->a_list, &tgtport->assoc_list); in nvmet_fc_alloc_target_assoc()
1145 return assoc; in nvmet_fc_alloc_target_assoc()
1152 kfree(assoc); in nvmet_fc_alloc_target_assoc()
1159 struct nvmet_fc_tgt_assoc *assoc = in nvmet_fc_target_assoc_free() local
1161 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_target_assoc_free()
1166 nvmet_fc_xmt_disconnect_assoc(assoc); in nvmet_fc_target_assoc_free()
1168 nvmet_fc_free_hostport(assoc->hostport); in nvmet_fc_target_assoc_free()
1170 list_del(&assoc->a_list); in nvmet_fc_target_assoc_free()
1171 oldls = assoc->rcv_disconn; in nvmet_fc_target_assoc_free()
1176 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); in nvmet_fc_target_assoc_free()
1179 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_target_assoc_free()
1180 kfree(assoc); in nvmet_fc_target_assoc_free()
1185 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) in nvmet_fc_tgt_a_put() argument
1187 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); in nvmet_fc_tgt_a_put()
1191 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) in nvmet_fc_tgt_a_get() argument
1193 return kref_get_unless_zero(&assoc->ref); in nvmet_fc_tgt_a_get()
1197 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) in nvmet_fc_delete_target_assoc() argument
1199 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_target_assoc()
1204 terminating = atomic_xchg(&assoc->terminating, 1); in nvmet_fc_delete_target_assoc()
1212 queue = assoc->queues[i]; in nvmet_fc_delete_target_assoc()
1226 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_delete_target_assoc()
1228 nvmet_fc_tgt_a_put(assoc); in nvmet_fc_delete_target_assoc()
1235 struct nvmet_fc_tgt_assoc *assoc; in nvmet_fc_find_target_assoc() local
1240 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_assoc()
1241 if (association_id == assoc->association_id) { in nvmet_fc_find_target_assoc()
1242 ret = assoc; in nvmet_fc_find_target_assoc()
1243 if (!nvmet_fc_tgt_a_get(assoc)) in nvmet_fc_find_target_assoc()
1476 struct nvmet_fc_tgt_assoc *assoc, *next; in __nvmet_fc_free_assocs() local
1480 list_for_each_entry_safe(assoc, next, in __nvmet_fc_free_assocs()
1482 if (!nvmet_fc_tgt_a_get(assoc)) in __nvmet_fc_free_assocs()
1484 if (!schedule_work(&assoc->del_work)) in __nvmet_fc_free_assocs()
1486 nvmet_fc_tgt_a_put(assoc); in __nvmet_fc_free_assocs()
1525 struct nvmet_fc_tgt_assoc *assoc, *next; in nvmet_fc_invalidate_host() local
1530 list_for_each_entry_safe(assoc, next, in nvmet_fc_invalidate_host()
1532 if (!assoc->hostport || in nvmet_fc_invalidate_host()
1533 assoc->hostport->hosthandle != hosthandle) in nvmet_fc_invalidate_host()
1535 if (!nvmet_fc_tgt_a_get(assoc)) in nvmet_fc_invalidate_host()
1537 assoc->hostport->invalid = 1; in nvmet_fc_invalidate_host()
1539 if (!schedule_work(&assoc->del_work)) in nvmet_fc_invalidate_host()
1541 nvmet_fc_tgt_a_put(assoc); in nvmet_fc_invalidate_host()
1558 struct nvmet_fc_tgt_assoc *assoc; in nvmet_fc_delete_ctrl() local
1572 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_delete_ctrl()
1573 queue = assoc->queues[0]; in nvmet_fc_delete_ctrl()
1575 if (nvmet_fc_tgt_a_get(assoc)) in nvmet_fc_delete_ctrl()
1585 if (!schedule_work(&assoc->del_work)) in nvmet_fc_delete_ctrl()
1587 nvmet_fc_tgt_a_put(assoc); in nvmet_fc_delete_ctrl()
1671 iod->assoc = nvmet_fc_alloc_target_assoc( in nvmet_fc_ls_create_association()
1673 if (!iod->assoc) in nvmet_fc_ls_create_association()
1676 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, in nvmet_fc_ls_create_association()
1700 tgtport->fc_target_port.port_num, iod->assoc->a_id); in nvmet_fc_ls_create_association()
1715 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); in nvmet_fc_ls_create_association()
1760 iod->assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_create_connection()
1762 if (!iod->assoc) in nvmet_fc_ls_create_connection()
1765 queue = nvmet_fc_alloc_target_queue(iod->assoc, in nvmet_fc_ls_create_connection()
1772 nvmet_fc_tgt_a_put(iod->assoc); in nvmet_fc_ls_create_connection()
1805 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, in nvmet_fc_ls_create_connection()
1821 struct nvmet_fc_tgt_assoc *assoc = NULL; in nvmet_fc_ls_disconnect() local
1831 assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_disconnect()
1833 iod->assoc = assoc; in nvmet_fc_ls_disconnect()
1834 if (!assoc) in nvmet_fc_ls_disconnect()
1838 if (ret || !assoc) { in nvmet_fc_ls_disconnect()
1861 nvmet_fc_tgt_a_put(assoc); in nvmet_fc_ls_disconnect()
1873 oldls = assoc->rcv_disconn; in nvmet_fc_ls_disconnect()
1874 assoc->rcv_disconn = iod; in nvmet_fc_ls_disconnect()
1877 nvmet_fc_delete_target_assoc(assoc); in nvmet_fc_ls_disconnect()
1883 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_ls_disconnect()
1948 iod->assoc = NULL; in nvmet_fc_handle_ls_rqst()