Lines Matching refs:vs

175 	struct vhost_scsi *vs;
252 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
259 for (i = 0; i < vs->dev.nvqs; i++) {
260 vq = &vs->vqs[i].vq;
265 idx = vs->vqs[i].inflight_idx;
267 old_inflight[i] = &vs->vqs[i].inflights[idx];
270 vs->vqs[i].inflight_idx = idx ^ 1;
271 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
413 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
415 vs->vs_events_nr--;
420 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
423 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
426 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
427 vs->vs_events_missed = true;
434 vs->vs_events_missed = true;
440 vs->vs_events_nr++;
451 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
453 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
460 vs->vs_events_missed = true;
465 vhost_disable_notify(&vs->dev, vq);
470 vs->vs_events_missed = true;
474 if (vhost_enable_notify(&vs->dev, vq))
476 vs->vs_events_missed = true;
483 vs->vs_events_missed = true;
487 if (vs->vs_events_missed) {
489 vs->vs_events_missed = false;
495 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
502 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
504 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
509 llnode = llist_del_all(&vs->vs_event_list);
511 vhost_scsi_do_evt_work(vs, evt);
512 vhost_scsi_free_evt(vs, evt);
591 vhost_signal(&svq->vs->dev, &svq->vq);
916 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
929 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
935 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
953 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
954 vhost_disable_notify(&vs->dev, vq);
1038 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1066 vhost_disable_notify(&vs->dev, vq);
1069 ret = vhost_scsi_get_desc(vs, vq, &vc);
1106 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1203 cmd->tvc_vhost = vs;
1241 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1248 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1264 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1301 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1324 tmf->vhost = vs;
1342 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1347 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1363 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1369 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1391 vhost_disable_notify(&vs->dev, vq);
1394 ret = vhost_scsi_get_desc(vs, vq, &vc);
1458 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1460 vhost_scsi_send_an_resp(vs, vq, &vc);
1471 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1481 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1484 vhost_scsi_ctl_handle_vq(vs, vq);
1488 vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1494 evt = vhost_scsi_allocate_evt(vs, event, reason);
1511 llist_add(&evt->list, &vs->vs_event_list);
1512 vhost_vq_work_queue(vq, &vs->vs_event_work);
1519 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1525 if (vs->vs_events_missed)
1526 vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
1536 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1538 vhost_scsi_handle_vq(vs, vq);
1542 static void vhost_scsi_flush(struct vhost_scsi *vs)
1547 vhost_scsi_init_inflight(vs, vs->old_inflight);
1554 for (i = 0; i < vs->dev.nvqs; i++)
1555 kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1558 vhost_dev_flush(&vs->dev);
1561 for (i = 0; i < vs->dev.nvqs; i++)
1562 wait_for_completion(&vs->old_inflight[i]->comp);
1656 * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1659 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1670 mutex_lock(&vs->dev.mutex);
1673 for (index = 0; index < vs->dev.nvqs; ++index) {
1675 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1687 if (vs->vs_tpg)
1688 memcpy(vs_tpg, vs->vs_tpg, len);
1704 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1725 tpg->vhost_scsi = vs;
1734 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1735 sizeof(vs->vs_vhost_wwpn));
1737 for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
1738 vq = &vs->vqs[i].vq;
1747 for (i = 0; i < vs->dev.nvqs; i++) {
1748 vq = &vs->vqs[i].vq;
1761 * old vs->vs_tpg is finished.
1763 vhost_scsi_flush(vs);
1764 kfree(vs->vs_tpg);
1765 vs->vs_tpg = vs_tpg;
1770 if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1771 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1786 mutex_unlock(&vs->dev.mutex);
1791 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1802 mutex_lock(&vs->dev.mutex);
1804 for (index = 0; index < vs->dev.nvqs; ++index) {
1805 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1811 if (!vs->vs_tpg) {
1818 tpg = vs->vs_tpg[target];
1842 for (i = 0; i < vs->dev.nvqs; i++) {
1843 vq = &vs->vqs[i].vq;
1849 vhost_scsi_flush(vs);
1851 for (i = 0; i < vs->dev.nvqs; i++) {
1852 vq = &vs->vqs[i].vq;
1862 tpg = vs->vs_tpg[target];
1870 vs->vs_tpg[target] = NULL;
1881 * old vs->vs_tpg is finished.
1883 vhost_scsi_flush(vs);
1884 kfree(vs->vs_tpg);
1885 vs->vs_tpg = NULL;
1886 WARN_ON(vs->vs_events_nr);
1887 mutex_unlock(&vs->dev.mutex);
1891 mutex_unlock(&vs->dev.mutex);
1895 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1903 mutex_lock(&vs->dev.mutex);
1905 !vhost_log_access_ok(&vs->dev)) {
1906 mutex_unlock(&vs->dev.mutex);
1910 for (i = 0; i < vs->dev.nvqs; i++) {
1911 vq = &vs->vqs[i].vq;
1916 mutex_unlock(&vs->dev.mutex);
1923 struct vhost_scsi *vs;
1927 vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1928 if (!vs)
1941 vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1943 if (!vs->old_inflight)
1946 vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1948 if (!vs->vqs)
1955 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1957 vs->vs_events_nr = 0;
1958 vs->vs_events_missed = false;
1960 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1961 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1962 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1963 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1965 svq = &vs->vqs[i];
1968 svq->vs = vs;
1974 vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
1977 vhost_scsi_init_inflight(vs, NULL);
1979 f->private_data = vs;
1983 kfree(vs->vqs);
1985 kfree(vs->old_inflight);
1987 kvfree(vs);
1994 struct vhost_scsi *vs = f->private_data;
1997 mutex_lock(&vs->dev.mutex);
1998 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1999 mutex_unlock(&vs->dev.mutex);
2000 vhost_scsi_clear_endpoint(vs, &t);
2001 vhost_dev_stop(&vs->dev);
2002 vhost_dev_cleanup(&vs->dev);
2003 kfree(vs->dev.vqs);
2004 kfree(vs->vqs);
2005 kfree(vs->old_inflight);
2006 kvfree(vs);
2015 struct vhost_scsi *vs = f->private_data;
2023 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2032 return vhost_scsi_set_endpoint(vs, &backend);
2039 return vhost_scsi_clear_endpoint(vs, &backend);
2048 vs->vs_events_missed = events_missed;
2053 events_missed = vs->vs_events_missed;
2066 return vhost_scsi_set_features(vs, features);
2071 mutex_lock(&vs->dev.mutex);
2072 r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
2073 mutex_unlock(&vs->dev.mutex);
2076 mutex_lock(&vs->dev.mutex);
2077 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
2080 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
2081 mutex_unlock(&vs->dev.mutex);
2132 struct vhost_scsi *vs = tpg->vhost_scsi;
2136 if (!vs)
2144 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2154 vhost_scsi_send_evt(vs, vq, tpg, lun,