Lines Matching refs:rport
124 struct srp_rport *rport = transport_class_to_srp_rport(dev); in show_srp_rport_id() local
125 return sprintf(buf, "%16phC\n", rport->port_id); in show_srp_rport_id()
142 struct srp_rport *rport = transport_class_to_srp_rport(dev); in show_srp_rport_roles() local
147 if (srp_rport_role_names[i].value == rport->roles) { in show_srp_rport_roles()
160 struct srp_rport *rport = transport_class_to_srp_rport(dev); in store_srp_rport_delete() local
165 i->f->rport_delete(rport); in store_srp_rport_delete()
184 struct srp_rport *rport = transport_class_to_srp_rport(dev); in show_srp_rport_state() local
185 enum srp_rport_state state = rport->state; in show_srp_rport_state()
215 struct srp_rport *rport = transport_class_to_srp_rport(dev); in show_reconnect_delay() local
217 return srp_show_tmo(buf, rport->reconnect_delay); in show_reconnect_delay()
224 struct srp_rport *rport = transport_class_to_srp_rport(dev); in store_reconnect_delay() local
230 res = srp_tmo_valid(delay, rport->fast_io_fail_tmo, in store_reconnect_delay()
231 rport->dev_loss_tmo); in store_reconnect_delay()
235 if (rport->reconnect_delay <= 0 && delay > 0 && in store_reconnect_delay()
236 rport->state != SRP_RPORT_RUNNING) { in store_reconnect_delay()
237 queue_delayed_work(system_long_wq, &rport->reconnect_work, in store_reconnect_delay()
240 cancel_delayed_work(&rport->reconnect_work); in store_reconnect_delay()
242 rport->reconnect_delay = delay; in store_reconnect_delay()
255 struct srp_rport *rport = transport_class_to_srp_rport(dev); in show_failed_reconnects() local
257 return sprintf(buf, "%d\n", rport->failed_reconnects); in show_failed_reconnects()
266 struct srp_rport *rport = transport_class_to_srp_rport(dev); in show_srp_rport_fast_io_fail_tmo() local
268 return srp_show_tmo(buf, rport->fast_io_fail_tmo); in show_srp_rport_fast_io_fail_tmo()
275 struct srp_rport *rport = transport_class_to_srp_rport(dev); in store_srp_rport_fast_io_fail_tmo() local
282 res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo, in store_srp_rport_fast_io_fail_tmo()
283 rport->dev_loss_tmo); in store_srp_rport_fast_io_fail_tmo()
286 rport->fast_io_fail_tmo = fast_io_fail_tmo; in store_srp_rport_fast_io_fail_tmo()
301 struct srp_rport *rport = transport_class_to_srp_rport(dev); in show_srp_rport_dev_loss_tmo() local
303 return srp_show_tmo(buf, rport->dev_loss_tmo); in show_srp_rport_dev_loss_tmo()
310 struct srp_rport *rport = transport_class_to_srp_rport(dev); in store_srp_rport_dev_loss_tmo() local
317 res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo, in store_srp_rport_dev_loss_tmo()
321 rport->dev_loss_tmo = dev_loss_tmo; in store_srp_rport_dev_loss_tmo()
332 static int srp_rport_set_state(struct srp_rport *rport, in srp_rport_set_state() argument
335 enum srp_rport_state old_state = rport->state; in srp_rport_set_state()
337 lockdep_assert_held(&rport->mutex); in srp_rport_set_state()
367 rport->state = new_state; in srp_rport_set_state()
380 struct srp_rport *rport = container_of(to_delayed_work(work), in srp_reconnect_work() local
382 struct Scsi_Host *shost = rport_to_shost(rport); in srp_reconnect_work()
385 res = srp_reconnect_rport(rport); in srp_reconnect_work()
389 ++rport->failed_reconnects, res); in srp_reconnect_work()
390 delay = rport->reconnect_delay * in srp_reconnect_work()
391 min(100, max(1, rport->failed_reconnects - 10)); in srp_reconnect_work()
394 &rport->reconnect_work, delay * HZ); in srp_reconnect_work()
402 static void __rport_fail_io_fast(struct srp_rport *rport) in __rport_fail_io_fast() argument
404 struct Scsi_Host *shost = rport_to_shost(rport); in __rport_fail_io_fast()
407 lockdep_assert_held(&rport->mutex); in __rport_fail_io_fast()
409 if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) in __rport_fail_io_fast()
412 scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); in __rport_fail_io_fast()
417 i->f->terminate_rport_io(rport); in __rport_fail_io_fast()
426 struct srp_rport *rport = container_of(to_delayed_work(work), in rport_fast_io_fail_timedout() local
428 struct Scsi_Host *shost = rport_to_shost(rport); in rport_fast_io_fail_timedout()
431 dev_name(&rport->dev), dev_name(&shost->shost_gendev)); in rport_fast_io_fail_timedout()
433 mutex_lock(&rport->mutex); in rport_fast_io_fail_timedout()
434 if (rport->state == SRP_RPORT_BLOCKED) in rport_fast_io_fail_timedout()
435 __rport_fail_io_fast(rport); in rport_fast_io_fail_timedout()
436 mutex_unlock(&rport->mutex); in rport_fast_io_fail_timedout()
445 struct srp_rport *rport = container_of(to_delayed_work(work), in rport_dev_loss_timedout() local
447 struct Scsi_Host *shost = rport_to_shost(rport); in rport_dev_loss_timedout()
451 dev_name(&rport->dev), dev_name(&shost->shost_gendev)); in rport_dev_loss_timedout()
453 mutex_lock(&rport->mutex); in rport_dev_loss_timedout()
454 WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0); in rport_dev_loss_timedout()
455 scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); in rport_dev_loss_timedout()
456 mutex_unlock(&rport->mutex); in rport_dev_loss_timedout()
458 i->f->rport_delete(rport); in rport_dev_loss_timedout()
461 static void __srp_start_tl_fail_timers(struct srp_rport *rport) in __srp_start_tl_fail_timers() argument
463 struct Scsi_Host *shost = rport_to_shost(rport); in __srp_start_tl_fail_timers()
466 lockdep_assert_held(&rport->mutex); in __srp_start_tl_fail_timers()
468 delay = rport->reconnect_delay; in __srp_start_tl_fail_timers()
469 fast_io_fail_tmo = rport->fast_io_fail_tmo; in __srp_start_tl_fail_timers()
470 dev_loss_tmo = rport->dev_loss_tmo; in __srp_start_tl_fail_timers()
472 rport->state); in __srp_start_tl_fail_timers()
474 if (rport->state == SRP_RPORT_LOST) in __srp_start_tl_fail_timers()
477 queue_delayed_work(system_long_wq, &rport->reconnect_work, in __srp_start_tl_fail_timers()
480 srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { in __srp_start_tl_fail_timers()
482 rport->state); in __srp_start_tl_fail_timers()
486 &rport->fast_io_fail_work, in __srp_start_tl_fail_timers()
490 &rport->dev_loss_work, in __srp_start_tl_fail_timers()
502 void srp_start_tl_fail_timers(struct srp_rport *rport) in srp_start_tl_fail_timers() argument
504 mutex_lock(&rport->mutex); in srp_start_tl_fail_timers()
505 __srp_start_tl_fail_timers(rport); in srp_start_tl_fail_timers()
506 mutex_unlock(&rport->mutex); in srp_start_tl_fail_timers()
532 int srp_reconnect_rport(struct srp_rport *rport) in srp_reconnect_rport() argument
534 struct Scsi_Host *shost = rport_to_shost(rport); in srp_reconnect_rport()
541 res = mutex_lock_interruptible(&rport->mutex); in srp_reconnect_rport()
544 if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) in srp_reconnect_rport()
552 res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; in srp_reconnect_rport()
554 dev_name(&shost->shost_gendev), rport->state, res); in srp_reconnect_rport()
556 cancel_delayed_work(&rport->fast_io_fail_work); in srp_reconnect_rport()
557 cancel_delayed_work(&rport->dev_loss_work); in srp_reconnect_rport()
559 rport->failed_reconnects = 0; in srp_reconnect_rport()
560 srp_rport_set_state(rport, SRP_RPORT_RUNNING); in srp_reconnect_rport()
573 } else if (rport->state == SRP_RPORT_RUNNING) { in srp_reconnect_rport()
579 __rport_fail_io_fast(rport); in srp_reconnect_rport()
580 __srp_start_tl_fail_timers(rport); in srp_reconnect_rport()
581 } else if (rport->state != SRP_RPORT_BLOCKED) { in srp_reconnect_rport()
585 mutex_unlock(&rport->mutex); in srp_reconnect_rport()
608 struct srp_rport *rport = shost_to_rport(shost); in srp_timed_out() local
611 return rport && rport->fast_io_fail_tmo < 0 && in srp_timed_out()
612 rport->dev_loss_tmo < 0 && in srp_timed_out()
620 struct srp_rport *rport = dev_to_rport(dev); in srp_rport_release() local
623 kfree(rport); in srp_rport_release()
672 void srp_rport_get(struct srp_rport *rport) in srp_rport_get() argument
674 get_device(&rport->dev); in srp_rport_get()
682 void srp_rport_put(struct srp_rport *rport) in srp_rport_put() argument
684 put_device(&rport->dev); in srp_rport_put()
698 struct srp_rport *rport; in srp_rport_add() local
703 rport = kzalloc(sizeof(*rport), GFP_KERNEL); in srp_rport_add()
704 if (!rport) in srp_rport_add()
707 mutex_init(&rport->mutex); in srp_rport_add()
709 device_initialize(&rport->dev); in srp_rport_add()
711 rport->dev.parent = get_device(parent); in srp_rport_add()
712 rport->dev.release = srp_rport_release; in srp_rport_add()
714 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); in srp_rport_add()
715 rport->roles = ids->roles; in srp_rport_add()
718 rport->reconnect_delay = i->f->reconnect_delay ? in srp_rport_add()
720 INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work); in srp_rport_add()
721 rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ? in srp_rport_add()
723 rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60; in srp_rport_add()
724 INIT_DELAYED_WORK(&rport->fast_io_fail_work, in srp_rport_add()
726 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); in srp_rport_add()
729 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); in srp_rport_add()
731 transport_setup_device(&rport->dev); in srp_rport_add()
733 ret = device_add(&rport->dev); in srp_rport_add()
735 transport_destroy_device(&rport->dev); in srp_rport_add()
736 put_device(&rport->dev); in srp_rport_add()
740 transport_add_device(&rport->dev); in srp_rport_add()
741 transport_configure_device(&rport->dev); in srp_rport_add()
743 return rport; in srp_rport_add()
753 void srp_rport_del(struct srp_rport *rport) in srp_rport_del() argument
755 struct device *dev = &rport->dev; in srp_rport_del()
793 void srp_stop_rport_timers(struct srp_rport *rport) in srp_stop_rport_timers() argument
795 mutex_lock(&rport->mutex); in srp_stop_rport_timers()
796 if (rport->state == SRP_RPORT_BLOCKED) in srp_stop_rport_timers()
797 __rport_fail_io_fast(rport); in srp_stop_rport_timers()
798 srp_rport_set_state(rport, SRP_RPORT_LOST); in srp_stop_rport_timers()
799 mutex_unlock(&rport->mutex); in srp_stop_rport_timers()
801 cancel_delayed_work_sync(&rport->reconnect_work); in srp_stop_rport_timers()
802 cancel_delayed_work_sync(&rport->fast_io_fail_work); in srp_stop_rport_timers()
803 cancel_delayed_work_sync(&rport->dev_loss_work); in srp_stop_rport_timers()