Lines Matching refs:lu
155 static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) in sbp2_queue_work() argument
157 queue_delayed_work(fw_workqueue, &lu->work, delay); in sbp2_queue_work()
192 static const struct device *lu_dev(const struct sbp2_logical_unit *lu) in lu_dev() argument
194 return &lu->tgt->unit->device; in lu_dev()
274 struct sbp2_logical_unit *lu; member
423 struct sbp2_logical_unit *lu = callback_data; in sbp2_status_write() local
441 dev_notice(lu_dev(lu), in sbp2_status_write()
448 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_status_write()
449 list_for_each_entry(orb, &lu->orb_list, link) { in sbp2_status_write()
457 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_status_write()
459 if (&orb->link != &lu->orb_list) { in sbp2_status_write()
463 dev_err(lu_dev(lu), "status write for unknown ORB\n"); in sbp2_status_write()
484 spin_lock_irqsave(&orb->lu->tgt->lock, flags); in complete_transaction()
490 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
495 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
501 static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, in sbp2_send_orb() argument
504 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_orb()
511 orb->lu = lu; in sbp2_send_orb()
512 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_send_orb()
513 list_add_tail(&orb->link, &lu->orb_list); in sbp2_send_orb()
514 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_send_orb()
524 static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) in sbp2_cancel_orbs() argument
526 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_cancel_orbs()
532 spin_lock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
533 list_splice_init(&lu->orb_list, &list); in sbp2_cancel_orbs()
534 spin_unlock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
560 static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, in sbp2_send_management_orb() argument
564 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_management_orb()
594 cpu_to_be32(lu->address_handler.offset >> 32); in sbp2_send_management_orb()
596 cpu_to_be32(lu->address_handler.offset); in sbp2_send_management_orb()
603 timeout = lu->tgt->mgt_orb_timeout; in sbp2_send_management_orb()
617 sbp2_send_orb(&orb->base, lu, node_id, generation, in sbp2_send_management_orb()
618 lu->tgt->management_agent_address); in sbp2_send_management_orb()
623 if (sbp2_cancel_orbs(lu) == 0) { in sbp2_send_management_orb()
624 dev_err(lu_dev(lu), "ORB reply timed out, rcode 0x%02x\n", in sbp2_send_management_orb()
630 dev_err(lu_dev(lu), "management write failed, rcode 0x%02x\n", in sbp2_send_management_orb()
637 dev_err(lu_dev(lu), "error status: %d:%d\n", in sbp2_send_management_orb()
658 static void sbp2_agent_reset(struct sbp2_logical_unit *lu) in sbp2_agent_reset() argument
660 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset()
664 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset()
665 lu->command_block_agent_address + SBP2_AGENT_RESET, in sbp2_agent_reset()
675 static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) in sbp2_agent_reset_no_wait() argument
677 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset_no_wait()
686 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset_no_wait()
687 lu->command_block_agent_address + SBP2_AGENT_RESET, in sbp2_agent_reset_no_wait()
708 static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) in sbp2_conditionally_block() argument
710 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_block()
717 if (!tgt->dont_block && !lu->blocked && in sbp2_conditionally_block()
718 lu->generation != card->generation) { in sbp2_conditionally_block()
719 lu->blocked = true; in sbp2_conditionally_block()
732 static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) in sbp2_conditionally_unblock() argument
734 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_unblock()
741 if (lu->blocked && lu->generation == card->generation) { in sbp2_conditionally_unblock()
742 lu->blocked = false; in sbp2_conditionally_unblock()
796 static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) in sbp2_set_busy_timeout() argument
798 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_set_busy_timeout()
802 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_set_busy_timeout()
810 struct sbp2_logical_unit *lu = in sbp2_login() local
812 struct sbp2_target *tgt = lu->tgt; in sbp2_login()
828 if (lu->has_sdev) in sbp2_login()
829 sbp2_send_management_orb(lu, device->node_id, generation, in sbp2_login()
830 SBP2_LOGOUT_REQUEST, lu->login_id, NULL); in sbp2_login()
832 if (sbp2_send_management_orb(lu, node_id, generation, in sbp2_login()
833 SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { in sbp2_login()
834 if (lu->retries++ < 5) { in sbp2_login()
835 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); in sbp2_login()
838 lu->lun); in sbp2_login()
840 sbp2_unblock(lu->tgt); in sbp2_login()
848 lu->generation = generation; in sbp2_login()
850 lu->command_block_agent_address = in sbp2_login()
853 lu->login_id = be32_to_cpu(response.misc) & 0xffff; in sbp2_login()
856 lu->lun, lu->retries); in sbp2_login()
859 sbp2_set_busy_timeout(lu); in sbp2_login()
861 lu->workfn = sbp2_reconnect; in sbp2_login()
862 sbp2_agent_reset(lu); in sbp2_login()
865 if (lu->has_sdev) { in sbp2_login()
866 sbp2_cancel_orbs(lu); in sbp2_login()
867 sbp2_conditionally_unblock(lu); in sbp2_login()
872 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) in sbp2_login()
876 sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); in sbp2_login()
898 lu->has_sdev = true; in sbp2_login()
909 sbp2_send_management_orb(lu, device->node_id, generation, in sbp2_login()
910 SBP2_LOGOUT_REQUEST, lu->login_id, NULL); in sbp2_login()
915 lu->workfn = sbp2_login; in sbp2_login()
920 struct sbp2_logical_unit *lu = in sbp2_reconnect() local
922 struct sbp2_target *tgt = lu->tgt; in sbp2_reconnect()
934 if (sbp2_send_management_orb(lu, node_id, generation, in sbp2_reconnect()
936 lu->login_id, NULL) < 0) { in sbp2_reconnect()
946 lu->retries++ >= 5) { in sbp2_reconnect()
948 lu->retries = 0; in sbp2_reconnect()
949 lu->workfn = sbp2_login; in sbp2_reconnect()
951 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); in sbp2_reconnect()
959 lu->generation = generation; in sbp2_reconnect()
962 lu->lun, lu->retries); in sbp2_reconnect()
964 sbp2_agent_reset(lu); in sbp2_reconnect()
965 sbp2_cancel_orbs(lu); in sbp2_reconnect()
966 sbp2_conditionally_unblock(lu); in sbp2_reconnect()
971 struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), in sbp2_lu_workfn() local
973 lu->workfn(work); in sbp2_lu_workfn()
978 struct sbp2_logical_unit *lu; in sbp2_add_logical_unit() local
980 lu = kmalloc(sizeof(*lu), GFP_KERNEL); in sbp2_add_logical_unit()
981 if (!lu) in sbp2_add_logical_unit()
984 lu->address_handler.length = 0x100; in sbp2_add_logical_unit()
985 lu->address_handler.address_callback = sbp2_status_write; in sbp2_add_logical_unit()
986 lu->address_handler.callback_data = lu; in sbp2_add_logical_unit()
988 if (fw_core_add_address_handler(&lu->address_handler, in sbp2_add_logical_unit()
990 kfree(lu); in sbp2_add_logical_unit()
994 lu->tgt = tgt; in sbp2_add_logical_unit()
995 lu->lun = lun_entry & 0xffff; in sbp2_add_logical_unit()
996 lu->login_id = INVALID_LOGIN_ID; in sbp2_add_logical_unit()
997 lu->retries = 0; in sbp2_add_logical_unit()
998 lu->has_sdev = false; in sbp2_add_logical_unit()
999 lu->blocked = false; in sbp2_add_logical_unit()
1001 INIT_LIST_HEAD(&lu->orb_list); in sbp2_add_logical_unit()
1002 lu->workfn = sbp2_login; in sbp2_add_logical_unit()
1003 INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); in sbp2_add_logical_unit()
1005 list_add_tail(&lu->link, &tgt->lu_list); in sbp2_add_logical_unit()
1139 struct sbp2_logical_unit *lu; in sbp2_probe() local
1195 list_for_each_entry(lu, &tgt->lu_list, link) in sbp2_probe()
1196 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); in sbp2_probe()
1212 struct sbp2_logical_unit *lu; in sbp2_update() local
1220 list_for_each_entry(lu, &tgt->lu_list, link) { in sbp2_update()
1221 sbp2_conditionally_block(lu); in sbp2_update()
1222 lu->retries = 0; in sbp2_update()
1223 sbp2_queue_work(lu, 0); in sbp2_update()
1231 struct sbp2_logical_unit *lu, *next; in sbp2_remove() local
1239 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { in sbp2_remove()
1240 cancel_delayed_work_sync(&lu->work); in sbp2_remove()
1241 sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); in sbp2_remove()
1246 if (lu->login_id != INVALID_LOGIN_ID) { in sbp2_remove()
1256 sbp2_send_management_orb(lu, node_id, generation, in sbp2_remove()
1258 lu->login_id, NULL); in sbp2_remove()
1260 fw_core_remove_address_handler(&lu->address_handler); in sbp2_remove()
1261 list_del(&lu->link); in sbp2_remove()
1262 kfree(lu); in sbp2_remove()
1356 struct fw_device *device = target_parent_device(base_orb->lu->tgt); in complete_command_orb()
1361 sbp2_agent_reset_no_wait(base_orb->lu); in complete_command_orb()
1387 sbp2_conditionally_block(base_orb->lu); in complete_command_orb()
1399 struct fw_device *device, struct sbp2_logical_unit *lu) in sbp2_map_scatterlist() argument
1417 cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1443 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1461 struct sbp2_logical_unit *lu = cmd->device->hostdata; in sbp2_scsi_queuecommand() local
1462 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_scsi_queuecommand()
1476 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | in sbp2_scsi_queuecommand()
1486 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) in sbp2_scsi_queuecommand()
1500 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, in sbp2_scsi_queuecommand()
1501 lu->command_block_agent_address + SBP2_ORB_POINTER); in sbp2_scsi_queuecommand()
1510 struct sbp2_logical_unit *lu = sdev->hostdata; in sbp2_scsi_slave_alloc() local
1513 if (!lu) in sbp2_scsi_slave_alloc()
1524 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) in sbp2_scsi_slave_alloc()
1532 struct sbp2_logical_unit *lu = sdev->hostdata; in sbp2_scsi_slave_configure() local
1543 lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) in sbp2_scsi_slave_configure()
1546 if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) in sbp2_scsi_slave_configure()
1549 if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION) in sbp2_scsi_slave_configure()
1552 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) in sbp2_scsi_slave_configure()
1564 struct sbp2_logical_unit *lu = cmd->device->hostdata; in sbp2_scsi_abort() local
1566 dev_notice(lu_dev(lu), "sbp2_scsi_abort\n"); in sbp2_scsi_abort()
1567 sbp2_agent_reset(lu); in sbp2_scsi_abort()
1568 sbp2_cancel_orbs(lu); in sbp2_scsi_abort()
1584 struct sbp2_logical_unit *lu; in sbp2_sysfs_ieee1394_id_show() local
1589 lu = sdev->hostdata; in sbp2_sysfs_ieee1394_id_show()
1592 (unsigned long long)lu->tgt->guid, in sbp2_sysfs_ieee1394_id_show()
1593 lu->tgt->directory_id, lu->lun); in sbp2_sysfs_ieee1394_id_show()