• Home
  • Raw
  • Download

Lines Matching refs:pg

85 	struct alua_port_group __rcu *pg;  member
104 static bool alua_rtpg_queue(struct alua_port_group *pg,
111 struct alua_port_group *pg; in release_port_group() local
113 pg = container_of(kref, struct alua_port_group, kref); in release_port_group()
114 if (pg->rtpg_sdev) in release_port_group()
115 flush_delayed_work(&pg->rtpg_work); in release_port_group()
117 list_del(&pg->node); in release_port_group()
119 kfree_rcu(pg, rcu); in release_port_group()
182 struct alua_port_group *pg; in alua_find_get_pg() local
187 list_for_each_entry(pg, &port_group_list, node) { in alua_find_get_pg()
188 if (pg->group_id != group_id) in alua_find_get_pg()
190 if (!pg->device_id_len || pg->device_id_len != id_size) in alua_find_get_pg()
192 if (strncmp(pg->device_id_str, id_str, id_size)) in alua_find_get_pg()
194 if (!kref_get_unless_zero(&pg->kref)) in alua_find_get_pg()
196 return pg; in alua_find_get_pg()
214 struct alua_port_group *pg, *tmp_pg; in alua_alloc_pg() local
216 pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); in alua_alloc_pg()
217 if (!pg) in alua_alloc_pg()
220 pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, in alua_alloc_pg()
221 sizeof(pg->device_id_str)); in alua_alloc_pg()
222 if (pg->device_id_len <= 0) { in alua_alloc_pg()
230 pg->device_id_str[0] = '\0'; in alua_alloc_pg()
231 pg->device_id_len = 0; in alua_alloc_pg()
233 pg->group_id = group_id; in alua_alloc_pg()
234 pg->tpgs = tpgs; in alua_alloc_pg()
235 pg->state = SCSI_ACCESS_STATE_OPTIMAL; in alua_alloc_pg()
236 pg->valid_states = TPGS_SUPPORT_ALL; in alua_alloc_pg()
238 pg->flags |= ALUA_OPTIMIZE_STPG; in alua_alloc_pg()
239 kref_init(&pg->kref); in alua_alloc_pg()
240 INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); in alua_alloc_pg()
241 INIT_LIST_HEAD(&pg->rtpg_list); in alua_alloc_pg()
242 INIT_LIST_HEAD(&pg->node); in alua_alloc_pg()
243 INIT_LIST_HEAD(&pg->dh_list); in alua_alloc_pg()
244 spin_lock_init(&pg->lock); in alua_alloc_pg()
247 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, in alua_alloc_pg()
251 kfree(pg); in alua_alloc_pg()
255 list_add(&pg->node, &port_group_list); in alua_alloc_pg()
258 return pg; in alua_alloc_pg()
324 struct alua_port_group *pg, *old_pg = NULL; in alua_check_vpd() local
341 pg = alua_alloc_pg(sdev, group_id, tpgs); in alua_check_vpd()
342 if (IS_ERR(pg)) { in alua_check_vpd()
343 if (PTR_ERR(pg) == -ENOMEM) in alua_check_vpd()
347 if (pg->device_id_len) in alua_check_vpd()
350 ALUA_DH_NAME, pg->device_id_str, in alua_check_vpd()
359 old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); in alua_check_vpd()
360 if (old_pg != pg) { in alua_check_vpd()
362 if (h->pg) { in alua_check_vpd()
367 rcu_assign_pointer(h->pg, pg); in alua_check_vpd()
371 spin_lock_irqsave(&pg->lock, flags); in alua_check_vpd()
373 list_add_rcu(&h->node, &pg->dh_list); in alua_check_vpd()
374 spin_unlock_irqrestore(&pg->lock, flags); in alua_check_vpd()
376 alua_rtpg_queue(rcu_dereference_protected(h->pg, in alua_check_vpd()
413 struct alua_port_group *pg; in alua_check_sense() local
422 pg = rcu_dereference(h->pg); in alua_check_sense()
423 if (pg) in alua_check_sense()
424 pg->state = SCSI_ACCESS_STATE_TRANSITIONING; in alua_check_sense()
514 static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) in alua_rtpg() argument
528 group_id_old = pg->group_id; in alua_rtpg()
529 state_old = pg->state; in alua_rtpg()
530 pref_old = pg->pref; in alua_rtpg()
531 valid_states_old = pg->valid_states; in alua_rtpg()
533 if (!pg->expiry) { in alua_rtpg()
536 if (pg->transition_tmo) in alua_rtpg()
537 transition_tmo = pg->transition_tmo * HZ; in alua_rtpg()
539 pg->expiry = round_jiffies_up(jiffies + transition_tmo); in alua_rtpg()
548 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); in alua_rtpg()
560 if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) { in alua_rtpg()
589 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && in alua_rtpg()
591 pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; in alua_rtpg()
610 pg->expiry != 0 && time_before(jiffies, pg->expiry)) { in alua_rtpg()
621 pg->expiry = 0; in alua_rtpg()
636 pg->expiry = 0; in alua_rtpg()
642 orig_transition_tmo = pg->transition_tmo; in alua_rtpg()
644 pg->transition_tmo = buff[5]; in alua_rtpg()
646 pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; in alua_rtpg()
648 if (orig_transition_tmo != pg->transition_tmo) { in alua_rtpg()
651 ALUA_DH_NAME, pg->transition_tmo); in alua_rtpg()
652 pg->expiry = jiffies + pg->transition_tmo * HZ; in alua_rtpg()
666 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, in alua_rtpg()
671 if ((tmp_pg == pg) || in alua_rtpg()
686 if (tmp_pg == pg) in alua_rtpg()
696 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg()
698 pg->state = SCSI_ACCESS_STATE_TRANSITIONING; in alua_rtpg()
700 if (group_id_old != pg->group_id || state_old != pg->state || in alua_rtpg()
701 pref_old != pg->pref || valid_states_old != pg->valid_states) in alua_rtpg()
704 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), in alua_rtpg()
705 pg->pref ? "preferred" : "non-preferred", in alua_rtpg()
706 pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', in alua_rtpg()
707 pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', in alua_rtpg()
708 pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', in alua_rtpg()
709 pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', in alua_rtpg()
710 pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s', in alua_rtpg()
711 pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', in alua_rtpg()
712 pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); in alua_rtpg()
714 switch (pg->state) { in alua_rtpg()
716 if (time_before(jiffies, pg->expiry)) { in alua_rtpg()
718 pg->interval = ALUA_RTPG_RETRY_DELAY; in alua_rtpg()
725 pg->state = SCSI_ACCESS_STATE_STANDBY; in alua_rtpg()
726 pg->expiry = 0; in alua_rtpg()
728 list_for_each_entry_rcu(h, &pg->dh_list, node) { in alua_rtpg()
732 (pg->state & SCSI_ACCESS_STATE_MASK); in alua_rtpg()
733 if (pg->pref) in alua_rtpg()
743 pg->expiry = 0; in alua_rtpg()
748 pg->expiry = 0; in alua_rtpg()
751 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg()
764 static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) in alua_stpg() argument
769 if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { in alua_stpg()
773 switch (pg->state) { in alua_stpg()
777 if ((pg->flags & ALUA_OPTIMIZE_STPG) && in alua_stpg()
778 !pg->pref && in alua_stpg()
779 (pg->tpgs & TPGS_MODE_IMPLICIT)) in alua_stpg()
792 ALUA_DH_NAME, pg->state); in alua_stpg()
795 retval = submit_stpg(sdev, pg->group_id, &sense_hdr); in alua_stpg()
814 static bool alua_rtpg_select_sdev(struct alua_port_group *pg) in alua_rtpg_select_sdev() argument
819 lockdep_assert_held(&pg->lock); in alua_rtpg_select_sdev()
820 if (WARN_ON(!pg->rtpg_sdev)) in alua_rtpg_select_sdev()
828 list_for_each_entry_rcu(h, &pg->dh_list, node) { in alua_rtpg_select_sdev()
831 if (h->sdev == pg->rtpg_sdev) { in alua_rtpg_select_sdev()
835 if (rcu_dereference(h->pg) == pg && in alua_rtpg_select_sdev()
846 (pg->device_id_len ? in alua_rtpg_select_sdev()
847 (char *)pg->device_id_str : "(nameless PG)")); in alua_rtpg_select_sdev()
853 scsi_device_put(pg->rtpg_sdev); in alua_rtpg_select_sdev()
854 pg->rtpg_sdev = sdev; in alua_rtpg_select_sdev()
861 struct alua_port_group *pg = in alua_rtpg_work() local
870 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
871 sdev = pg->rtpg_sdev; in alua_rtpg_work()
873 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); in alua_rtpg_work()
874 WARN_ON(pg->flags & ALUA_PG_RUN_STPG); in alua_rtpg_work()
875 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
876 kref_put(&pg->kref, release_port_group); in alua_rtpg_work()
879 pg->flags |= ALUA_PG_RUNNING; in alua_rtpg_work()
880 if (pg->flags & ALUA_PG_RUN_RTPG) { in alua_rtpg_work()
881 int state = pg->state; in alua_rtpg_work()
883 pg->flags &= ~ALUA_PG_RUN_RTPG; in alua_rtpg_work()
884 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
887 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
888 pg->flags &= ~ALUA_PG_RUNNING; in alua_rtpg_work()
889 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_work()
890 if (!pg->interval) in alua_rtpg_work()
891 pg->interval = ALUA_RTPG_RETRY_DELAY; in alua_rtpg_work()
892 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
893 queue_delayed_work(kaluad_wq, &pg->rtpg_work, in alua_rtpg_work()
894 pg->interval * HZ); in alua_rtpg_work()
899 err = alua_rtpg(sdev, pg); in alua_rtpg_work()
900 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
904 alua_rtpg_select_sdev(pg)) in alua_rtpg_work()
908 pg->flags & ALUA_PG_RUN_RTPG) { in alua_rtpg_work()
909 pg->flags &= ~ALUA_PG_RUNNING; in alua_rtpg_work()
911 pg->interval = 0; in alua_rtpg_work()
912 else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) in alua_rtpg_work()
913 pg->interval = ALUA_RTPG_RETRY_DELAY; in alua_rtpg_work()
914 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_work()
915 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
916 queue_delayed_work(kaluad_wq, &pg->rtpg_work, in alua_rtpg_work()
917 pg->interval * HZ); in alua_rtpg_work()
921 pg->flags &= ~ALUA_PG_RUN_STPG; in alua_rtpg_work()
923 if (pg->flags & ALUA_PG_RUN_STPG) { in alua_rtpg_work()
924 pg->flags &= ~ALUA_PG_RUN_STPG; in alua_rtpg_work()
925 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
926 err = alua_stpg(sdev, pg); in alua_rtpg_work()
927 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
928 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { in alua_rtpg_work()
929 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_work()
930 pg->interval = 0; in alua_rtpg_work()
931 pg->flags &= ~ALUA_PG_RUNNING; in alua_rtpg_work()
932 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
933 queue_delayed_work(kaluad_wq, &pg->rtpg_work, in alua_rtpg_work()
934 pg->interval * HZ); in alua_rtpg_work()
939 list_splice_init(&pg->rtpg_list, &qdata_list); in alua_rtpg_work()
944 list_for_each_entry(h, &pg->dh_list, node) in alua_rtpg_work()
946 pg->rtpg_sdev = NULL; in alua_rtpg_work()
947 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
955 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
956 pg->flags &= ~ALUA_PG_RUNNING; in alua_rtpg_work()
957 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
959 kref_put(&pg->kref, release_port_group); in alua_rtpg_work()
973 static bool alua_rtpg_queue(struct alua_port_group *pg, in alua_rtpg_queue() argument
979 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) in alua_rtpg_queue()
982 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_queue()
984 list_add_tail(&qdata->entry, &pg->rtpg_list); in alua_rtpg_queue()
985 pg->flags |= ALUA_PG_RUN_STPG; in alua_rtpg_queue()
988 if (pg->rtpg_sdev == NULL) { in alua_rtpg_queue()
989 pg->interval = 0; in alua_rtpg_queue()
990 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_queue()
991 kref_get(&pg->kref); in alua_rtpg_queue()
992 pg->rtpg_sdev = sdev; in alua_rtpg_queue()
994 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { in alua_rtpg_queue()
995 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_queue()
997 if (!(pg->flags & ALUA_PG_RUNNING)) { in alua_rtpg_queue()
998 kref_get(&pg->kref); in alua_rtpg_queue()
1003 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_queue()
1006 if (queue_delayed_work(kaluad_wq, &pg->rtpg_work, in alua_rtpg_queue()
1010 kref_put(&pg->kref, release_port_group); in alua_rtpg_queue()
1050 struct alua_port_group *pg = NULL; in alua_set_params() local
1065 pg = rcu_dereference(h->pg); in alua_set_params()
1066 if (!pg) { in alua_set_params()
1070 spin_lock_irqsave(&pg->lock, flags); in alua_set_params()
1072 pg->flags |= ALUA_OPTIMIZE_STPG; in alua_set_params()
1074 pg->flags &= ~ALUA_OPTIMIZE_STPG; in alua_set_params()
1075 spin_unlock_irqrestore(&pg->lock, flags); in alua_set_params()
1097 struct alua_port_group *pg; in alua_activate() local
1109 pg = rcu_dereference(h->pg); in alua_activate()
1110 if (!pg || !kref_get_unless_zero(&pg->kref)) { in alua_activate()
1120 if (alua_rtpg_queue(pg, sdev, qdata, true)) { in alua_activate()
1126 kref_put(&pg->kref, release_port_group); in alua_activate()
1142 struct alua_port_group *pg; in alua_check() local
1145 pg = rcu_dereference(h->pg); in alua_check()
1146 if (!pg || !kref_get_unless_zero(&pg->kref)) { in alua_check()
1151 alua_rtpg_queue(pg, sdev, NULL, force); in alua_check()
1152 kref_put(&pg->kref, release_port_group); in alua_check()
1164 struct alua_port_group *pg; in alua_prep_fn() local
1168 pg = rcu_dereference(h->pg); in alua_prep_fn()
1169 if (pg) in alua_prep_fn()
1170 state = pg->state; in alua_prep_fn()
1205 rcu_assign_pointer(h->pg, NULL); in alua_bus_attach()
1229 struct alua_port_group *pg; in alua_bus_detach() local
1232 pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); in alua_bus_detach()
1233 rcu_assign_pointer(h->pg, NULL); in alua_bus_detach()
1235 if (pg) { in alua_bus_detach()
1236 spin_lock_irq(&pg->lock); in alua_bus_detach()
1238 spin_unlock_irq(&pg->lock); in alua_bus_detach()
1239 kref_put(&pg->kref, release_port_group); in alua_bus_detach()