Lines Matching refs:pg

85 	struct alua_port_group __rcu *pg;
104 static bool alua_rtpg_queue(struct alua_port_group *pg,
111 struct alua_port_group *pg;
113 pg = container_of(kref, struct alua_port_group, kref);
114 if (pg->rtpg_sdev)
115 flush_delayed_work(&pg->rtpg_work);
117 list_del(&pg->node);
119 kfree_rcu(pg, rcu);
188 struct alua_port_group *pg;
193 list_for_each_entry(pg, &port_group_list, node) {
194 if (pg->group_id != group_id)
196 if (!pg->device_id_len || pg->device_id_len != id_size)
198 if (strncmp(pg->device_id_str, id_str, id_size))
200 if (!kref_get_unless_zero(&pg->kref))
202 return pg;
220 struct alua_port_group *pg, *tmp_pg;
222 pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL);
223 if (!pg)
226 pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str,
227 sizeof(pg->device_id_str));
228 if (pg->device_id_len <= 0) {
236 pg->device_id_str[0] = '\0';
237 pg->device_id_len = 0;
239 pg->group_id = group_id;
240 pg->tpgs = tpgs;
241 pg->state = SCSI_ACCESS_STATE_OPTIMAL;
242 pg->valid_states = TPGS_SUPPORT_ALL;
244 pg->flags |= ALUA_OPTIMIZE_STPG;
245 kref_init(&pg->kref);
246 INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work);
247 INIT_LIST_HEAD(&pg->rtpg_list);
248 INIT_LIST_HEAD(&pg->node);
249 INIT_LIST_HEAD(&pg->dh_list);
250 spin_lock_init(&pg->lock);
253 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
257 kfree(pg);
261 list_add(&pg->node, &port_group_list);
264 return pg;
330 struct alua_port_group *pg, *old_pg = NULL;
347 pg = alua_alloc_pg(sdev, group_id, tpgs);
348 if (IS_ERR(pg)) {
349 if (PTR_ERR(pg) == -ENOMEM)
353 if (pg->device_id_len)
356 ALUA_DH_NAME, pg->device_id_str,
363 kref_get(&pg->kref);
367 old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
368 if (old_pg != pg) {
370 if (h->pg) {
375 rcu_assign_pointer(h->pg, pg);
379 spin_lock_irqsave(&pg->lock, flags);
381 list_add_rcu(&h->node, &pg->dh_list);
382 spin_unlock_irqrestore(&pg->lock, flags);
386 alua_rtpg_queue(pg, sdev, NULL, true);
387 kref_put(&pg->kref, release_port_group);
421 struct alua_port_group *pg;
430 pg = rcu_dereference(h->pg);
431 if (pg)
432 pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
522 static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
536 group_id_old = pg->group_id;
537 state_old = pg->state;
538 pref_old = pg->pref;
539 valid_states_old = pg->valid_states;
541 if (!pg->expiry) {
544 if (pg->transition_tmo)
545 transition_tmo = pg->transition_tmo * HZ;
547 pg->expiry = round_jiffies_up(jiffies + transition_tmo);
556 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
568 if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
597 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
599 pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
618 pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
629 pg->expiry = 0;
644 pg->expiry = 0;
650 orig_transition_tmo = pg->transition_tmo;
652 pg->transition_tmo = buff[5];
654 pg->transition_tmo = ALUA_FAILOVER_TIMEOUT;
656 if (orig_transition_tmo != pg->transition_tmo) {
659 ALUA_DH_NAME, pg->transition_tmo);
660 pg->expiry = jiffies + pg->transition_tmo * HZ;
674 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
679 if ((tmp_pg == pg) ||
694 if (tmp_pg == pg)
704 spin_lock_irqsave(&pg->lock, flags);
706 pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
708 if (group_id_old != pg->group_id || state_old != pg->state ||
709 pref_old != pg->pref || valid_states_old != pg->valid_states)
712 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
713 pg->pref ? "preferred" : "non-preferred",
714 pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
715 pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
716 pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
717 pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
718 pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
719 pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
720 pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
722 switch (pg->state) {
724 if (time_before(jiffies, pg->expiry)) {
726 pg->interval = ALUA_RTPG_RETRY_DELAY;
733 pg->state = SCSI_ACCESS_STATE_STANDBY;
734 pg->expiry = 0;
736 list_for_each_entry_rcu(h, &pg->dh_list, node) {
740 (pg->state & SCSI_ACCESS_STATE_MASK);
741 if (pg->pref)
751 pg->expiry = 0;
756 pg->expiry = 0;
759 spin_unlock_irqrestore(&pg->lock, flags);
772 static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
777 if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) {
781 switch (pg->state) {
785 if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
786 !pg->pref &&
787 (pg->tpgs & TPGS_MODE_IMPLICIT))
800 ALUA_DH_NAME, pg->state);
803 retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
827 alua_rtpg_select_sdev(struct alua_port_group *pg)
832 lockdep_assert_held(&pg->lock);
833 if (WARN_ON(!pg->rtpg_sdev))
838 * as we hold pg->lock, but for access to h->pg.
841 list_for_each_entry_rcu(h, &pg->dh_list, node) {
844 if (h->sdev == pg->rtpg_sdev) {
848 if (rcu_dereference(h->pg) == pg &&
859 (pg->device_id_len ?
860 (char *)pg->device_id_str : "(nameless PG)"));
866 prev_sdev = pg->rtpg_sdev;
867 pg->rtpg_sdev = sdev;
874 struct alua_port_group *pg =
883 spin_lock_irqsave(&pg->lock, flags);
884 sdev = pg->rtpg_sdev;
886 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
887 WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
888 spin_unlock_irqrestore(&pg->lock, flags);
889 kref_put(&pg->kref, release_port_group);
892 pg->flags |= ALUA_PG_RUNNING;
893 if (pg->flags & ALUA_PG_RUN_RTPG) {
894 int state = pg->state;
896 pg->flags &= ~ALUA_PG_RUN_RTPG;
897 spin_unlock_irqrestore(&pg->lock, flags);
900 spin_lock_irqsave(&pg->lock, flags);
901 pg->flags &= ~ALUA_PG_RUNNING;
902 pg->flags |= ALUA_PG_RUN_RTPG;
903 if (!pg->interval)
904 pg->interval = ALUA_RTPG_RETRY_DELAY;
905 spin_unlock_irqrestore(&pg->lock, flags);
906 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
907 pg->interval * HZ);
912 err = alua_rtpg(sdev, pg);
913 spin_lock_irqsave(&pg->lock, flags);
917 (prev_sdev = alua_rtpg_select_sdev(pg)))
921 pg->flags & ALUA_PG_RUN_RTPG) {
922 pg->flags &= ~ALUA_PG_RUNNING;
924 pg->interval = 0;
925 else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
926 pg->interval = ALUA_RTPG_RETRY_DELAY;
927 pg->flags |= ALUA_PG_RUN_RTPG;
928 spin_unlock_irqrestore(&pg->lock, flags);
932 pg->flags &= ~ALUA_PG_RUN_STPG;
934 if (pg->flags & ALUA_PG_RUN_STPG) {
935 pg->flags &= ~ALUA_PG_RUN_STPG;
936 spin_unlock_irqrestore(&pg->lock, flags);
937 err = alua_stpg(sdev, pg);
938 spin_lock_irqsave(&pg->lock, flags);
939 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
940 pg->flags |= ALUA_PG_RUN_RTPG;
941 pg->interval = 0;
942 pg->flags &= ~ALUA_PG_RUNNING;
943 spin_unlock_irqrestore(&pg->lock, flags);
948 list_splice_init(&pg->rtpg_list, &qdata_list);
953 list_for_each_entry(h, &pg->dh_list, node)
955 pg->rtpg_sdev = NULL;
956 spin_unlock_irqrestore(&pg->lock, flags);
967 spin_lock_irqsave(&pg->lock, flags);
968 pg->flags &= ~ALUA_PG_RUNNING;
969 spin_unlock_irqrestore(&pg->lock, flags);
971 kref_put(&pg->kref, release_port_group);
977 queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ);
982 * @pg: ALUA port group associated with @sdev.
994 static bool alua_rtpg_queue(struct alua_port_group *pg,
1001 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
1004 spin_lock_irqsave(&pg->lock, flags);
1006 list_add_tail(&qdata->entry, &pg->rtpg_list);
1007 pg->flags |= ALUA_PG_RUN_STPG;
1010 if (pg->rtpg_sdev == NULL) {
1014 if (h && rcu_dereference(h->pg) == pg) {
1015 pg->interval = 0;
1016 pg->flags |= ALUA_PG_RUN_RTPG;
1017 kref_get(&pg->kref);
1018 pg->rtpg_sdev = sdev;
1022 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
1023 pg->flags |= ALUA_PG_RUN_RTPG;
1025 if (!(pg->flags & ALUA_PG_RUNNING)) {
1026 kref_get(&pg->kref);
1031 spin_unlock_irqrestore(&pg->lock, flags);
1034 if (queue_delayed_work(kaluad_wq, &pg->rtpg_work,
1038 kref_put(&pg->kref, release_port_group);
1078 struct alua_port_group *pg = NULL;
1093 pg = rcu_dereference(h->pg);
1094 if (!pg) {
1098 spin_lock_irqsave(&pg->lock, flags);
1100 pg->flags |= ALUA_OPTIMIZE_STPG;
1102 pg->flags &= ~ALUA_OPTIMIZE_STPG;
1103 spin_unlock_irqrestore(&pg->lock, flags);
1125 struct alua_port_group *pg;
1137 pg = rcu_dereference(h->pg);
1138 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1148 if (alua_rtpg_queue(pg, sdev, qdata, true)) {
1154 kref_put(&pg->kref, release_port_group);
1170 struct alua_port_group *pg;
1173 pg = rcu_dereference(h->pg);
1174 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1179 alua_rtpg_queue(pg, sdev, NULL, force);
1180 kref_put(&pg->kref, release_port_group);
1192 struct alua_port_group *pg;
1196 pg = rcu_dereference(h->pg);
1197 if (pg)
1198 state = pg->state;
1233 rcu_assign_pointer(h->pg, NULL);
1257 struct alua_port_group *pg;
1260 pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
1261 rcu_assign_pointer(h->pg, NULL);
1263 if (pg) {
1264 spin_lock_irq(&pg->lock);
1266 spin_unlock_irq(&pg->lock);
1267 kref_put(&pg->kref, release_port_group);