Lines Matching refs:rport

124 	struct srp_rport *rport = transport_class_to_srp_rport(dev);
125 return sprintf(buf, "%16phC\n", rport->port_id);
142 struct srp_rport *rport = transport_class_to_srp_rport(dev);
147 if (srp_rport_role_names[i].value == rport->roles) {
160 struct srp_rport *rport = transport_class_to_srp_rport(dev);
165 i->f->rport_delete(rport);
184 struct srp_rport *rport = transport_class_to_srp_rport(dev);
185 enum srp_rport_state state = rport->state;
215 struct srp_rport *rport = transport_class_to_srp_rport(dev);
217 return srp_show_tmo(buf, rport->reconnect_delay);
224 struct srp_rport *rport = transport_class_to_srp_rport(dev);
230 res = srp_tmo_valid(delay, rport->fast_io_fail_tmo,
231 rport->dev_loss_tmo);
235 if (rport->reconnect_delay <= 0 && delay > 0 &&
236 rport->state != SRP_RPORT_RUNNING) {
237 queue_delayed_work(system_long_wq, &rport->reconnect_work,
240 cancel_delayed_work(&rport->reconnect_work);
242 rport->reconnect_delay = delay;
255 struct srp_rport *rport = transport_class_to_srp_rport(dev);
257 return sprintf(buf, "%d\n", rport->failed_reconnects);
266 struct srp_rport *rport = transport_class_to_srp_rport(dev);
268 return srp_show_tmo(buf, rport->fast_io_fail_tmo);
275 struct srp_rport *rport = transport_class_to_srp_rport(dev);
282 res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
283 rport->dev_loss_tmo);
286 rport->fast_io_fail_tmo = fast_io_fail_tmo;
301 struct srp_rport *rport = transport_class_to_srp_rport(dev);
303 return srp_show_tmo(buf, rport->dev_loss_tmo);
310 struct srp_rport *rport = transport_class_to_srp_rport(dev);
317 res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
321 rport->dev_loss_tmo = dev_loss_tmo;
332 static int srp_rport_set_state(struct srp_rport *rport,
335 enum srp_rport_state old_state = rport->state;
337 lockdep_assert_held(&rport->mutex);
367 rport->state = new_state;
380 struct srp_rport *rport = container_of(to_delayed_work(work),
382 struct Scsi_Host *shost = rport_to_shost(rport);
385 res = srp_reconnect_rport(rport);
389 ++rport->failed_reconnects, res);
390 delay = rport->reconnect_delay *
391 min(100, max(1, rport->failed_reconnects - 10));
394 &rport->reconnect_work, delay * HZ);
402 static void __rport_fail_io_fast(struct srp_rport *rport)
404 struct Scsi_Host *shost = rport_to_shost(rport);
407 lockdep_assert_held(&rport->mutex);
409 if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
412 scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
414 /* Involve the LLD if possible to terminate all I/O on the rport. */
417 i->f->terminate_rport_io(rport);
426 struct srp_rport *rport = container_of(to_delayed_work(work),
428 struct Scsi_Host *shost = rport_to_shost(rport);
431 dev_name(&rport->dev), dev_name(&shost->shost_gendev));
433 mutex_lock(&rport->mutex);
434 if (rport->state == SRP_RPORT_BLOCKED)
435 __rport_fail_io_fast(rport);
436 mutex_unlock(&rport->mutex);
445 struct srp_rport *rport = container_of(to_delayed_work(work),
447 struct Scsi_Host *shost = rport_to_shost(rport);
451 dev_name(&rport->dev), dev_name(&shost->shost_gendev));
453 mutex_lock(&rport->mutex);
454 WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0);
455 scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
456 mutex_unlock(&rport->mutex);
458 i->f->rport_delete(rport);
461 static void __srp_start_tl_fail_timers(struct srp_rport *rport)
463 struct Scsi_Host *shost = rport_to_shost(rport);
466 lockdep_assert_held(&rport->mutex);
468 delay = rport->reconnect_delay;
469 fast_io_fail_tmo = rport->fast_io_fail_tmo;
470 dev_loss_tmo = rport->dev_loss_tmo;
472 rport->state);
474 if (rport->state == SRP_RPORT_LOST)
477 queue_delayed_work(system_long_wq, &rport->reconnect_work,
480 srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
482 rport->state);
486 &rport->fast_io_fail_work,
490 &rport->dev_loss_work,
497 * @rport: SRP target port.
502 void srp_start_tl_fail_timers(struct srp_rport *rport)
504 mutex_lock(&rport->mutex);
505 __srp_start_tl_fail_timers(rport);
506 mutex_unlock(&rport->mutex);
512 * @rport: SRP target port.
528 * lock the rport mutex inside each SCSI LLD callback that can be invoked by
532 int srp_reconnect_rport(struct srp_rport *rport)
534 struct Scsi_Host *shost = rport_to_shost(rport);
541 res = mutex_lock_interruptible(&rport->mutex);
544 if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
552 res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
554 dev_name(&shost->shost_gendev), rport->state, res);
556 cancel_delayed_work(&rport->fast_io_fail_work);
557 cancel_delayed_work(&rport->dev_loss_work);
559 rport->failed_reconnects = 0;
560 srp_rport_set_state(rport, SRP_RPORT_RUNNING);
573 } else if (rport->state == SRP_RPORT_RUNNING) {
579 __rport_fail_io_fast(rport);
580 __srp_start_tl_fail_timers(rport);
581 } else if (rport->state != SRP_RPORT_BLOCKED) {
585 mutex_unlock(&rport->mutex);
596 * If a timeout occurs while an rport is in the blocked state, ask the SCSI
608 struct srp_rport *rport = shost_to_rport(shost);
611 return rport && rport->fast_io_fail_tmo < 0 &&
612 rport->dev_loss_tmo < 0 &&
620 struct srp_rport *rport = dev_to_rport(dev);
623 kfree(rport);
669 * srp_rport_get() - increment rport reference count
670 * @rport: SRP target port.
672 void srp_rport_get(struct srp_rport *rport)
674 get_device(&rport->dev);
679 * srp_rport_put() - decrement rport reference count
680 * @rport: SRP target port.
682 void srp_rport_put(struct srp_rport *rport)
684 put_device(&rport->dev);
698 struct srp_rport *rport;
703 rport = kzalloc(sizeof(*rport), GFP_KERNEL);
704 if (!rport)
707 mutex_init(&rport->mutex);
709 device_initialize(&rport->dev);
711 rport->dev.parent = get_device(parent);
712 rport->dev.release = srp_rport_release;
714 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
715 rport->roles = ids->roles;
718 rport->reconnect_delay = i->f->reconnect_delay ?
720 INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work);
721 rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ?
723 rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60;
724 INIT_DELAYED_WORK(&rport->fast_io_fail_work,
726 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
729 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
731 transport_setup_device(&rport->dev);
733 ret = device_add(&rport->dev);
735 transport_destroy_device(&rport->dev);
736 put_device(&rport->dev);
740 transport_add_device(&rport->dev);
741 transport_configure_device(&rport->dev);
743 return rport;
749 * @rport: SRP remote port to remove
753 void srp_rport_del(struct srp_rport *rport)
755 struct device *dev = &rport->dev;
787 * @rport: SRP remote port for which to stop the timers.
790 * must hold a reference on the rport (rport->dev) and on the SCSI host
791 * (rport->dev.parent).
793 void srp_stop_rport_timers(struct srp_rport *rport)
795 mutex_lock(&rport->mutex);
796 if (rport->state == SRP_RPORT_BLOCKED)
797 __rport_fail_io_fast(rport);
798 srp_rport_set_state(rport, SRP_RPORT_LOST);
799 mutex_unlock(&rport->mutex);
801 cancel_delayed_work_sync(&rport->reconnect_work);
802 cancel_delayed_work_sync(&rport->fast_io_fail_work);
803 cancel_delayed_work_sync(&rport->dev_loss_work);