1// SPDX-License-Identifier: GPL-2.0+
2/*******************************************************************************
3 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 *
5 * (C) Copyright 2010-2013 Datera, Inc.
6 * (C) Copyright 2010-2012 IBM Corp.
7 *
8 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9 *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10 ****************************************************************************/
11
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <generated/utsrelease.h>
15#include <linux/utsname.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/kthread.h>
19#include <linux/types.h>
20#include <linux/string.h>
21#include <linux/configfs.h>
22#include <linux/ctype.h>
23#include <linux/compat.h>
24#include <linux/eventfd.h>
25#include <linux/fs.h>
26#include <linux/vmalloc.h>
27#include <linux/miscdevice.h>
28#include <linux/blk_types.h>
29#include <linux/bio.h>
30#include <asm/unaligned.h>
31#include <scsi/scsi_common.h>
32#include <scsi/scsi_proto.h>
33#include <target/target_core_base.h>
34#include <target/target_core_fabric.h>
35#include <linux/vhost.h>
36#include <linux/virtio_scsi.h>
37#include <linux/llist.h>
38#include <linux/bitmap.h>
39
40#include "vhost.h"
41
42#define VHOST_SCSI_VERSION  "v0.1"
43#define VHOST_SCSI_NAMELEN 256
44#define VHOST_SCSI_MAX_CDB_SIZE 32
45#define VHOST_SCSI_PREALLOC_SGLS 2048
46#define VHOST_SCSI_PREALLOC_UPAGES 2048
47#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
48
49/* Max number of requests before requeueing the job.
50 * Using this limit prevents one virtqueue from starving others with
51 * request.
52 */
53#define VHOST_SCSI_WEIGHT 256
54
55struct vhost_scsi_inflight {
56	/* Wait for the flush operation to finish */
57	struct completion comp;
58	/* Refcount for the inflight reqs */
59	struct kref kref;
60};
61
62struct vhost_scsi_cmd {
63	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
64	int tvc_vq_desc;
65	/* virtio-scsi initiator task attribute */
66	int tvc_task_attr;
67	/* virtio-scsi response incoming iovecs */
68	int tvc_in_iovs;
69	/* virtio-scsi initiator data direction */
70	enum dma_data_direction tvc_data_direction;
71	/* Expected data transfer length from virtio-scsi header */
72	u32 tvc_exp_data_len;
73	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
74	u64 tvc_tag;
75	/* The number of scatterlists associated with this cmd */
76	u32 tvc_sgl_count;
77	u32 tvc_prot_sgl_count;
78	/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
79	u32 tvc_lun;
80	u32 copied_iov:1;
81	const void *saved_iter_addr;
82	struct iov_iter saved_iter;
83	/* Pointer to the SGL formatted memory from virtio-scsi */
84	struct scatterlist *tvc_sgl;
85	struct scatterlist *tvc_prot_sgl;
86	struct page **tvc_upages;
87	/* Pointer to response header iovec */
88	struct iovec *tvc_resp_iov;
89	/* Pointer to vhost_scsi for our device */
90	struct vhost_scsi *tvc_vhost;
91	/* Pointer to vhost_virtqueue for the cmd */
92	struct vhost_virtqueue *tvc_vq;
93	/* Pointer to vhost nexus memory */
94	struct vhost_scsi_nexus *tvc_nexus;
95	/* The TCM I/O descriptor that is accessed via container_of() */
96	struct se_cmd tvc_se_cmd;
97	/* Copy of the incoming SCSI command descriptor block (CDB) */
98	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
99	/* Sense buffer that will be mapped into outgoing status */
100	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
101	/* Completed commands list, serviced from vhost worker thread */
102	struct llist_node tvc_completion_list;
103	/* Used to track inflight cmd */
104	struct vhost_scsi_inflight *inflight;
105};
106
107struct vhost_scsi_nexus {
108	/* Pointer to TCM session for I_T Nexus */
109	struct se_session *tvn_se_sess;
110};
111
112struct vhost_scsi_tpg {
113	/* Vhost port target portal group tag for TCM */
114	u16 tport_tpgt;
115	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
116	int tv_tpg_port_count;
117	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
118	int tv_tpg_vhost_count;
119	/* Used for enabling T10-PI with legacy devices */
120	int tv_fabric_prot_type;
121	/* list for vhost_scsi_list */
122	struct list_head tv_tpg_list;
123	/* Used to protect access for tpg_nexus */
124	struct mutex tv_tpg_mutex;
125	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
126	struct vhost_scsi_nexus *tpg_nexus;
127	/* Pointer back to vhost_scsi_tport */
128	struct vhost_scsi_tport *tport;
129	/* Returned by vhost_scsi_make_tpg() */
130	struct se_portal_group se_tpg;
131	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
132	struct vhost_scsi *vhost_scsi;
133};
134
135struct vhost_scsi_tport {
136	/* SCSI protocol the tport is providing */
137	u8 tport_proto_id;
138	/* Binary World Wide unique Port Name for Vhost Target port */
139	u64 tport_wwpn;
140	/* ASCII formatted WWPN for Vhost Target port */
141	char tport_name[VHOST_SCSI_NAMELEN];
142	/* Returned by vhost_scsi_make_tport() */
143	struct se_wwn tport_wwn;
144};
145
146struct vhost_scsi_evt {
147	/* event to be sent to guest */
148	struct virtio_scsi_event event;
149	/* event list, serviced from vhost worker thread */
150	struct llist_node list;
151};
152
153enum {
154	VHOST_SCSI_VQ_CTL = 0,
155	VHOST_SCSI_VQ_EVT = 1,
156	VHOST_SCSI_VQ_IO = 2,
157};
158
159/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
160enum {
161	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
162					       (1ULL << VIRTIO_SCSI_F_T10_PI)
163};
164
165#define VHOST_SCSI_MAX_TARGET	256
166#define VHOST_SCSI_MAX_IO_VQ	1024
167#define VHOST_SCSI_MAX_EVENT	128
168
169static unsigned vhost_scsi_max_io_vqs = 128;
170module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
171MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
172
173struct vhost_scsi_virtqueue {
174	struct vhost_virtqueue vq;
175	struct vhost_scsi *vs;
176	/*
177	 * Reference counting for inflight reqs, used for flush operation. At
178	 * each time, one reference tracks new commands submitted, while we
179	 * wait for another one to reach 0.
180	 */
181	struct vhost_scsi_inflight inflights[2];
182	/*
183	 * Indicate current inflight in use, protected by vq->mutex.
184	 * Writers must also take dev mutex and flush under it.
185	 */
186	int inflight_idx;
187	struct vhost_scsi_cmd *scsi_cmds;
188	struct sbitmap scsi_tags;
189	int max_cmds;
190
191	struct vhost_work completion_work;
192	struct llist_head completion_list;
193};
194
195struct vhost_scsi {
196	/* Protected by vhost_scsi->dev.mutex */
197	struct vhost_scsi_tpg **vs_tpg;
198	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199
200	struct vhost_dev dev;
201	struct vhost_scsi_virtqueue *vqs;
202	struct vhost_scsi_inflight **old_inflight;
203
204	struct vhost_work vs_event_work; /* evt injection work item */
205	struct llist_head vs_event_list; /* evt injection queue */
206
207	bool vs_events_missed; /* any missed events, protected by vq->mutex */
208	int vs_events_nr; /* num of pending events, protected by vq->mutex */
209};
210
211struct vhost_scsi_tmf {
212	struct vhost_work vwork;
213	struct vhost_scsi *vhost;
214	struct vhost_scsi_virtqueue *svq;
215
216	struct se_cmd se_cmd;
217	u8 scsi_resp;
218	struct vhost_scsi_inflight *inflight;
219	struct iovec resp_iov;
220	int in_iovs;
221	int vq_desc;
222};
223
224/*
225 * Context for processing request and control queue operations.
226 */
227struct vhost_scsi_ctx {
228	int head;
229	unsigned int out, in;
230	size_t req_size, rsp_size;
231	size_t out_size, in_size;
232	u8 *target, *lunp;
233	void *req;
234	struct iov_iter out_iter;
235};
236
237/*
238 * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
239 * configfs management operations.
240 */
241static DEFINE_MUTEX(vhost_scsi_mutex);
242static LIST_HEAD(vhost_scsi_list);
243
244static void vhost_scsi_done_inflight(struct kref *kref)
245{
246	struct vhost_scsi_inflight *inflight;
247
248	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
249	complete(&inflight->comp);
250}
251
252static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
253				    struct vhost_scsi_inflight *old_inflight[])
254{
255	struct vhost_scsi_inflight *new_inflight;
256	struct vhost_virtqueue *vq;
257	int idx, i;
258
259	for (i = 0; i < vs->dev.nvqs;  i++) {
260		vq = &vs->vqs[i].vq;
261
262		mutex_lock(&vq->mutex);
263
264		/* store old infight */
265		idx = vs->vqs[i].inflight_idx;
266		if (old_inflight)
267			old_inflight[i] = &vs->vqs[i].inflights[idx];
268
269		/* setup new infight */
270		vs->vqs[i].inflight_idx = idx ^ 1;
271		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
272		kref_init(&new_inflight->kref);
273		init_completion(&new_inflight->comp);
274
275		mutex_unlock(&vq->mutex);
276	}
277}
278
279static struct vhost_scsi_inflight *
280vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
281{
282	struct vhost_scsi_inflight *inflight;
283	struct vhost_scsi_virtqueue *svq;
284
285	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
286	inflight = &svq->inflights[svq->inflight_idx];
287	kref_get(&inflight->kref);
288
289	return inflight;
290}
291
292static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
293{
294	kref_put(&inflight->kref, vhost_scsi_done_inflight);
295}
296
297static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
298{
299	return 1;
300}
301
302static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
303{
304	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
305				struct vhost_scsi_tpg, se_tpg);
306	struct vhost_scsi_tport *tport = tpg->tport;
307
308	return &tport->tport_name[0];
309}
310
311static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
312{
313	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
314				struct vhost_scsi_tpg, se_tpg);
315	return tpg->tport_tpgt;
316}
317
318static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
319{
320	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
321				struct vhost_scsi_tpg, se_tpg);
322
323	return tpg->tv_fabric_prot_type;
324}
325
326static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
327{
328	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
329				struct vhost_scsi_cmd, tvc_se_cmd);
330	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
331				struct vhost_scsi_virtqueue, vq);
332	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
333	int i;
334
335	if (tv_cmd->tvc_sgl_count) {
336		for (i = 0; i < tv_cmd->tvc_sgl_count; i++) {
337			if (tv_cmd->copied_iov)
338				__free_page(sg_page(&tv_cmd->tvc_sgl[i]));
339			else
340				put_page(sg_page(&tv_cmd->tvc_sgl[i]));
341		}
342		kfree(tv_cmd->saved_iter_addr);
343	}
344	if (tv_cmd->tvc_prot_sgl_count) {
345		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
346			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
347	}
348
349	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
350	vhost_scsi_put_inflight(inflight);
351}
352
353static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
354{
355	struct vhost_scsi_inflight *inflight = tmf->inflight;
356
357	kfree(tmf);
358	vhost_scsi_put_inflight(inflight);
359}
360
361static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
362{
363	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
364		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
365					struct vhost_scsi_tmf, se_cmd);
366		struct vhost_virtqueue *vq = &tmf->svq->vq;
367
368		vhost_vq_work_queue(vq, &tmf->vwork);
369	} else {
370		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
371					struct vhost_scsi_cmd, tvc_se_cmd);
372		struct vhost_scsi_virtqueue *svq =  container_of(cmd->tvc_vq,
373					struct vhost_scsi_virtqueue, vq);
374
375		llist_add(&cmd->tvc_completion_list, &svq->completion_list);
376		vhost_vq_work_queue(&svq->vq, &svq->completion_work);
377	}
378}
379
380static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
381{
382	/* Go ahead and process the write immediately */
383	target_execute_cmd(se_cmd);
384	return 0;
385}
386
387static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
388{
389	transport_generic_free_cmd(se_cmd, 0);
390	return 0;
391}
392
393static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
394{
395	transport_generic_free_cmd(se_cmd, 0);
396	return 0;
397}
398
399static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
400{
401	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
402						  se_cmd);
403
404	tmf->scsi_resp = se_cmd->se_tmr_req->response;
405	transport_generic_free_cmd(&tmf->se_cmd, 0);
406}
407
408static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
409{
410	return;
411}
412
413static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
414{
415	vs->vs_events_nr--;
416	kfree(evt);
417}
418
419static struct vhost_scsi_evt *
420vhost_scsi_allocate_evt(struct vhost_scsi *vs,
421		       u32 event, u32 reason)
422{
423	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
424	struct vhost_scsi_evt *evt;
425
426	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
427		vs->vs_events_missed = true;
428		return NULL;
429	}
430
431	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
432	if (!evt) {
433		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
434		vs->vs_events_missed = true;
435		return NULL;
436	}
437
438	evt->event.event = cpu_to_vhost32(vq, event);
439	evt->event.reason = cpu_to_vhost32(vq, reason);
440	vs->vs_events_nr++;
441
442	return evt;
443}
444
445static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
446{
447	return target_put_sess_cmd(se_cmd);
448}
449
450static void
451vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
452{
453	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
454	struct virtio_scsi_event *event = &evt->event;
455	struct virtio_scsi_event __user *eventp;
456	unsigned out, in;
457	int head, ret;
458
459	if (!vhost_vq_get_backend(vq)) {
460		vs->vs_events_missed = true;
461		return;
462	}
463
464again:
465	vhost_disable_notify(&vs->dev, vq);
466	head = vhost_get_vq_desc(vq, vq->iov,
467			ARRAY_SIZE(vq->iov), &out, &in,
468			NULL, NULL);
469	if (head < 0) {
470		vs->vs_events_missed = true;
471		return;
472	}
473	if (head == vq->num) {
474		if (vhost_enable_notify(&vs->dev, vq))
475			goto again;
476		vs->vs_events_missed = true;
477		return;
478	}
479
480	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
481		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
482				vq->iov[out].iov_len);
483		vs->vs_events_missed = true;
484		return;
485	}
486
487	if (vs->vs_events_missed) {
488		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
489		vs->vs_events_missed = false;
490	}
491
492	eventp = vq->iov[out].iov_base;
493	ret = __copy_to_user(eventp, event, sizeof(*event));
494	if (!ret)
495		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
496	else
497		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
498}
499
500static void vhost_scsi_evt_work(struct vhost_work *work)
501{
502	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
503					vs_event_work);
504	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
505	struct vhost_scsi_evt *evt, *t;
506	struct llist_node *llnode;
507
508	mutex_lock(&vq->mutex);
509	llnode = llist_del_all(&vs->vs_event_list);
510	llist_for_each_entry_safe(evt, t, llnode, list) {
511		vhost_scsi_do_evt_work(vs, evt);
512		vhost_scsi_free_evt(vs, evt);
513	}
514	mutex_unlock(&vq->mutex);
515}
516
517static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
518{
519	struct iov_iter *iter = &cmd->saved_iter;
520	struct scatterlist *sg = cmd->tvc_sgl;
521	struct page *page;
522	size_t len;
523	int i;
524
525	for (i = 0; i < cmd->tvc_sgl_count; i++) {
526		page = sg_page(&sg[i]);
527		len = sg[i].length;
528
529		if (copy_page_to_iter(page, 0, len, iter) != len) {
530			pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
531			       len);
532			return -1;
533		}
534	}
535
536	return 0;
537}
538
539/* Fill in status and signal that we are done processing this command
540 *
541 * This is scheduled in the vhost work queue so we are called with the owner
542 * process mm and can access the vring.
543 */
544static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
545{
546	struct vhost_scsi_virtqueue *svq = container_of(work,
547				struct vhost_scsi_virtqueue, completion_work);
548	struct virtio_scsi_cmd_resp v_rsp;
549	struct vhost_scsi_cmd *cmd, *t;
550	struct llist_node *llnode;
551	struct se_cmd *se_cmd;
552	struct iov_iter iov_iter;
553	bool signal = false;
554	int ret;
555
556	llnode = llist_del_all(&svq->completion_list);
557	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
558		se_cmd = &cmd->tvc_se_cmd;
559
560		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
561			cmd, se_cmd->residual_count, se_cmd->scsi_status);
562		memset(&v_rsp, 0, sizeof(v_rsp));
563
564		if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) {
565			v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
566		} else {
567			v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
568						     se_cmd->residual_count);
569			/* TODO is status_qualifier field needed? */
570			v_rsp.status = se_cmd->scsi_status;
571			v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
572							 se_cmd->scsi_sense_length);
573			memcpy(v_rsp.sense, cmd->tvc_sense_buf,
574			       se_cmd->scsi_sense_length);
575		}
576
577		iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
578			      cmd->tvc_in_iovs, sizeof(v_rsp));
579		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
580		if (likely(ret == sizeof(v_rsp))) {
581			signal = true;
582
583			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
584		} else
585			pr_err("Faulted on virtio_scsi_cmd_resp\n");
586
587		vhost_scsi_release_cmd_res(se_cmd);
588	}
589
590	if (signal)
591		vhost_signal(&svq->vs->dev, &svq->vq);
592}
593
594static struct vhost_scsi_cmd *
595vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
596		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
597		   u32 exp_data_len, int data_direction)
598{
599	struct vhost_scsi_virtqueue *svq = container_of(vq,
600					struct vhost_scsi_virtqueue, vq);
601	struct vhost_scsi_cmd *cmd;
602	struct vhost_scsi_nexus *tv_nexus;
603	struct scatterlist *sg, *prot_sg;
604	struct iovec *tvc_resp_iov;
605	struct page **pages;
606	int tag;
607
608	tv_nexus = tpg->tpg_nexus;
609	if (!tv_nexus) {
610		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
611		return ERR_PTR(-EIO);
612	}
613
614	tag = sbitmap_get(&svq->scsi_tags);
615	if (tag < 0) {
616		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
617		return ERR_PTR(-ENOMEM);
618	}
619
620	cmd = &svq->scsi_cmds[tag];
621	sg = cmd->tvc_sgl;
622	prot_sg = cmd->tvc_prot_sgl;
623	pages = cmd->tvc_upages;
624	tvc_resp_iov = cmd->tvc_resp_iov;
625	memset(cmd, 0, sizeof(*cmd));
626	cmd->tvc_sgl = sg;
627	cmd->tvc_prot_sgl = prot_sg;
628	cmd->tvc_upages = pages;
629	cmd->tvc_se_cmd.map_tag = tag;
630	cmd->tvc_tag = scsi_tag;
631	cmd->tvc_lun = lun;
632	cmd->tvc_task_attr = task_attr;
633	cmd->tvc_exp_data_len = exp_data_len;
634	cmd->tvc_data_direction = data_direction;
635	cmd->tvc_nexus = tv_nexus;
636	cmd->inflight = vhost_scsi_get_inflight(vq);
637	cmd->tvc_resp_iov = tvc_resp_iov;
638
639	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
640
641	return cmd;
642}
643
644/*
645 * Map a user memory range into a scatterlist
646 *
647 * Returns the number of scatterlist entries used or -errno on error.
648 */
649static int
650vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
651		      struct iov_iter *iter,
652		      struct scatterlist *sgl,
653		      bool is_prot)
654{
655	struct page **pages = cmd->tvc_upages;
656	struct scatterlist *sg = sgl;
657	ssize_t bytes, mapped_bytes;
658	size_t offset, mapped_offset;
659	unsigned int npages = 0;
660
661	bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
662				VHOST_SCSI_PREALLOC_UPAGES, &offset);
663	/* No pages were pinned */
664	if (bytes <= 0)
665		return bytes < 0 ? bytes : -EFAULT;
666
667	mapped_bytes = bytes;
668	mapped_offset = offset;
669
670	while (bytes) {
671		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
672		/*
673		 * The block layer requires bios/requests to be a multiple of
674		 * 512 bytes, but Windows can send us vecs that are misaligned.
675		 * This can result in bios and later requests with misaligned
676		 * sizes if we have to break up a cmd/scatterlist into multiple
677		 * bios.
678		 *
679		 * We currently only break up a command into multiple bios if
680		 * we hit the vec/seg limit, so check if our sgl_count is
681		 * greater than the max and if a vec in the cmd has a
682		 * misaligned offset/size.
683		 */
684		if (!is_prot &&
685		    (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
686		    cmd->tvc_sgl_count > BIO_MAX_VECS) {
687			WARN_ONCE(true,
688				  "vhost-scsi detected misaligned IO. Performance may be degraded.");
689			goto revert_iter_get_pages;
690		}
691
692		sg_set_page(sg++, pages[npages++], n, offset);
693		bytes -= n;
694		offset = 0;
695	}
696
697	return npages;
698
699revert_iter_get_pages:
700	iov_iter_revert(iter, mapped_bytes);
701
702	npages = 0;
703	while (mapped_bytes) {
704		unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset,
705				       mapped_bytes);
706
707		put_page(pages[npages++]);
708
709		mapped_bytes -= n;
710		mapped_offset = 0;
711	}
712
713	return -EINVAL;
714}
715
716static int
717vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
718{
719	int sgl_count = 0;
720
721	if (!iter || !iter_iov(iter)) {
722		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
723		       " present\n", __func__, bytes);
724		return -EINVAL;
725	}
726
727	sgl_count = iov_iter_npages(iter, 0xffff);
728	if (sgl_count > max_sgls) {
729		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
730		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
731		return -EINVAL;
732	}
733	return sgl_count;
734}
735
736static int
737vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
738			   struct scatterlist *sg, int sg_count)
739{
740	size_t len = iov_iter_count(iter);
741	unsigned int nbytes = 0;
742	struct page *page;
743	int i;
744
745	if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
746		cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
747						GFP_KERNEL);
748		if (!cmd->saved_iter_addr)
749			return -ENOMEM;
750	}
751
752	for (i = 0; i < sg_count; i++) {
753		page = alloc_page(GFP_KERNEL);
754		if (!page) {
755			i--;
756			goto err;
757		}
758
759		nbytes = min_t(unsigned int, PAGE_SIZE, len);
760		sg_set_page(&sg[i], page, nbytes, 0);
761
762		if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
763		    copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
764			goto err;
765
766		len -= nbytes;
767	}
768
769	cmd->copied_iov = 1;
770	return 0;
771
772err:
773	pr_err("Could not read %u bytes while handling misaligned cmd\n",
774	       nbytes);
775
776	for (; i >= 0; i--)
777		__free_page(sg_page(&sg[i]));
778	kfree(cmd->saved_iter_addr);
779	return -ENOMEM;
780}
781
782static int
783vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
784			  struct scatterlist *sg, int sg_count, bool is_prot)
785{
786	struct scatterlist *p = sg;
787	size_t revert_bytes;
788	int ret;
789
790	while (iov_iter_count(iter)) {
791		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot);
792		if (ret < 0) {
793			revert_bytes = 0;
794
795			while (p < sg) {
796				struct page *page = sg_page(p);
797
798				if (page) {
799					put_page(page);
800					revert_bytes += p->length;
801				}
802				p++;
803			}
804
805			iov_iter_revert(iter, revert_bytes);
806			return ret;
807		}
808		sg += ret;
809	}
810
811	return 0;
812}
813
814static int
815vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
816		 size_t prot_bytes, struct iov_iter *prot_iter,
817		 size_t data_bytes, struct iov_iter *data_iter)
818{
819	int sgl_count, ret;
820
821	if (prot_bytes) {
822		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
823						 VHOST_SCSI_PREALLOC_PROT_SGLS);
824		if (sgl_count < 0)
825			return sgl_count;
826
827		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
828		cmd->tvc_prot_sgl_count = sgl_count;
829		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
830			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
831
832		ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
833						cmd->tvc_prot_sgl,
834						cmd->tvc_prot_sgl_count, true);
835		if (ret < 0) {
836			cmd->tvc_prot_sgl_count = 0;
837			return ret;
838		}
839	}
840	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
841					 VHOST_SCSI_PREALLOC_SGLS);
842	if (sgl_count < 0)
843		return sgl_count;
844
845	sg_init_table(cmd->tvc_sgl, sgl_count);
846	cmd->tvc_sgl_count = sgl_count;
847	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
848		  cmd->tvc_sgl, cmd->tvc_sgl_count);
849
850	ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
851					cmd->tvc_sgl_count, false);
852	if (ret == -EINVAL) {
853		sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count);
854		ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
855						 cmd->tvc_sgl_count);
856	}
857
858	if (ret < 0) {
859		cmd->tvc_sgl_count = 0;
860		return ret;
861	}
862	return 0;
863}
864
865static int vhost_scsi_to_tcm_attr(int attr)
866{
867	switch (attr) {
868	case VIRTIO_SCSI_S_SIMPLE:
869		return TCM_SIMPLE_TAG;
870	case VIRTIO_SCSI_S_ORDERED:
871		return TCM_ORDERED_TAG;
872	case VIRTIO_SCSI_S_HEAD:
873		return TCM_HEAD_TAG;
874	case VIRTIO_SCSI_S_ACA:
875		return TCM_ACA_TAG;
876	default:
877		break;
878	}
879	return TCM_SIMPLE_TAG;
880}
881
882static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
883{
884	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
885	struct vhost_scsi_nexus *tv_nexus;
886	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
887
888	/* FIXME: BIDI operation */
889	if (cmd->tvc_sgl_count) {
890		sg_ptr = cmd->tvc_sgl;
891
892		if (cmd->tvc_prot_sgl_count)
893			sg_prot_ptr = cmd->tvc_prot_sgl;
894		else
895			se_cmd->prot_pto = true;
896	} else {
897		sg_ptr = NULL;
898	}
899	tv_nexus = cmd->tvc_nexus;
900
901	se_cmd->tag = 0;
902	target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
903			cmd->tvc_lun, cmd->tvc_exp_data_len,
904			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
905			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
906
907	if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
908			       cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
909			       cmd->tvc_prot_sgl_count, GFP_KERNEL))
910		return;
911
912	target_submit(se_cmd);
913}
914
915static void
916vhost_scsi_send_bad_target(struct vhost_scsi *vs,
917			   struct vhost_virtqueue *vq,
918			   int head, unsigned out)
919{
920	struct virtio_scsi_cmd_resp __user *resp;
921	struct virtio_scsi_cmd_resp rsp;
922	int ret;
923
924	memset(&rsp, 0, sizeof(rsp));
925	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
926	resp = vq->iov[out].iov_base;
927	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
928	if (!ret)
929		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
930	else
931		pr_err("Faulted on virtio_scsi_cmd_resp\n");
932}
933
934static int
935vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
936		    struct vhost_scsi_ctx *vc)
937{
938	int ret = -ENXIO;
939
940	vc->head = vhost_get_vq_desc(vq, vq->iov,
941				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
942				     NULL, NULL);
943
944	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
945		 vc->head, vc->out, vc->in);
946
947	/* On error, stop handling until the next kick. */
948	if (unlikely(vc->head < 0))
949		goto done;
950
951	/* Nothing new?  Wait for eventfd to tell us they refilled. */
952	if (vc->head == vq->num) {
953		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
954			vhost_disable_notify(&vs->dev, vq);
955			ret = -EAGAIN;
956		}
957		goto done;
958	}
959
960	/*
961	 * Get the size of request and response buffers.
962	 * FIXME: Not correct for BIDI operation
963	 */
964	vc->out_size = iov_length(vq->iov, vc->out);
965	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
966
967	/*
968	 * Copy over the virtio-scsi request header, which for a
969	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
970	 * single iovec may contain both the header + outgoing
971	 * WRITE payloads.
972	 *
973	 * copy_from_iter() will advance out_iter, so that it will
974	 * point at the start of the outgoing WRITE payload, if
975	 * DMA_TO_DEVICE is set.
976	 */
977	iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
978	ret = 0;
979
980done:
981	return ret;
982}
983
984static int
985vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
986{
987	if (unlikely(vc->in_size < vc->rsp_size)) {
988		vq_err(vq,
989		       "Response buf too small, need min %zu bytes got %zu",
990		       vc->rsp_size, vc->in_size);
991		return -EINVAL;
992	} else if (unlikely(vc->out_size < vc->req_size)) {
993		vq_err(vq,
994		       "Request buf too small, need min %zu bytes got %zu",
995		       vc->req_size, vc->out_size);
996		return -EIO;
997	}
998
999	return 0;
1000}
1001
1002static int
1003vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1004		   struct vhost_scsi_tpg **tpgp)
1005{
1006	int ret = -EIO;
1007
1008	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1009					  &vc->out_iter))) {
1010		vq_err(vq, "Faulted on copy_from_iter_full\n");
1011	} else if (unlikely(*vc->lunp != 1)) {
1012		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
1013		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1014	} else {
1015		struct vhost_scsi_tpg **vs_tpg, *tpg;
1016
1017		vs_tpg = vhost_vq_get_backend(vq);	/* validated at handler entry */
1018
1019		tpg = READ_ONCE(vs_tpg[*vc->target]);
1020		if (unlikely(!tpg)) {
1021			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
1022		} else {
1023			if (tpgp)
1024				*tpgp = tpg;
1025			ret = 0;
1026		}
1027	}
1028
1029	return ret;
1030}
1031
1032static u16 vhost_buf_to_lun(u8 *lun_buf)
1033{
1034	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
1035}
1036
1037static void
1038vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1039{
1040	struct vhost_scsi_tpg **vs_tpg, *tpg;
1041	struct virtio_scsi_cmd_req v_req;
1042	struct virtio_scsi_cmd_req_pi v_req_pi;
1043	struct vhost_scsi_ctx vc;
1044	struct vhost_scsi_cmd *cmd;
1045	struct iov_iter in_iter, prot_iter, data_iter;
1046	u64 tag;
1047	u32 exp_data_len, data_direction;
1048	int ret, prot_bytes, i, c = 0;
1049	u16 lun;
1050	u8 task_attr;
1051	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1052	void *cdb;
1053
1054	mutex_lock(&vq->mutex);
1055	/*
1056	 * We can handle the vq only after the endpoint is setup by calling the
1057	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1058	 */
1059	vs_tpg = vhost_vq_get_backend(vq);
1060	if (!vs_tpg)
1061		goto out;
1062
1063	memset(&vc, 0, sizeof(vc));
1064	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1065
1066	vhost_disable_notify(&vs->dev, vq);
1067
1068	do {
1069		ret = vhost_scsi_get_desc(vs, vq, &vc);
1070		if (ret)
1071			goto err;
1072
1073		/*
1074		 * Setup pointers and values based upon different virtio-scsi
1075		 * request header if T10_PI is enabled in KVM guest.
1076		 */
1077		if (t10_pi) {
1078			vc.req = &v_req_pi;
1079			vc.req_size = sizeof(v_req_pi);
1080			vc.lunp = &v_req_pi.lun[0];
1081			vc.target = &v_req_pi.lun[1];
1082		} else {
1083			vc.req = &v_req;
1084			vc.req_size = sizeof(v_req);
1085			vc.lunp = &v_req.lun[0];
1086			vc.target = &v_req.lun[1];
1087		}
1088
1089		/*
1090		 * Validate the size of request and response buffers.
1091		 * Check for a sane response buffer so we can report
1092		 * early errors back to the guest.
1093		 */
1094		ret = vhost_scsi_chk_size(vq, &vc);
1095		if (ret)
1096			goto err;
1097
1098		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1099		if (ret)
1100			goto err;
1101
1102		ret = -EIO;	/* bad target on any error from here on */
1103
1104		/*
1105		 * Determine data_direction by calculating the total outgoing
1106		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1107		 * response headers respectively.
1108		 *
1109		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1110		 * to the right place.
1111		 *
1112		 * For DMA_FROM_DEVICE, the iovec will be just past the end
1113		 * of the virtio-scsi response header in either the same
1114		 * or immediately following iovec.
1115		 *
1116		 * Any associated T10_PI bytes for the outgoing / incoming
1117		 * payloads are included in calculation of exp_data_len here.
1118		 */
1119		prot_bytes = 0;
1120
1121		if (vc.out_size > vc.req_size) {
1122			data_direction = DMA_TO_DEVICE;
1123			exp_data_len = vc.out_size - vc.req_size;
1124			data_iter = vc.out_iter;
1125		} else if (vc.in_size > vc.rsp_size) {
1126			data_direction = DMA_FROM_DEVICE;
1127			exp_data_len = vc.in_size - vc.rsp_size;
1128
1129			iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1130				      vc.rsp_size + exp_data_len);
1131			iov_iter_advance(&in_iter, vc.rsp_size);
1132			data_iter = in_iter;
1133		} else {
1134			data_direction = DMA_NONE;
1135			exp_data_len = 0;
1136		}
1137		/*
1138		 * If T10_PI header + payload is present, setup prot_iter values
1139		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1140		 * host scatterlists via get_user_pages_fast().
1141		 */
1142		if (t10_pi) {
1143			if (v_req_pi.pi_bytesout) {
1144				if (data_direction != DMA_TO_DEVICE) {
1145					vq_err(vq, "Received non zero pi_bytesout,"
1146						" but wrong data_direction\n");
1147					goto err;
1148				}
1149				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1150			} else if (v_req_pi.pi_bytesin) {
1151				if (data_direction != DMA_FROM_DEVICE) {
1152					vq_err(vq, "Received non zero pi_bytesin,"
1153						" but wrong data_direction\n");
1154					goto err;
1155				}
1156				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1157			}
1158			/*
1159			 * Set prot_iter to data_iter and truncate it to
1160			 * prot_bytes, and advance data_iter past any
1161			 * preceding prot_bytes that may be present.
1162			 *
1163			 * Also fix up the exp_data_len to reflect only the
1164			 * actual data payload length.
1165			 */
1166			if (prot_bytes) {
1167				exp_data_len -= prot_bytes;
1168				prot_iter = data_iter;
1169				iov_iter_truncate(&prot_iter, prot_bytes);
1170				iov_iter_advance(&data_iter, prot_bytes);
1171			}
1172			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1173			task_attr = v_req_pi.task_attr;
1174			cdb = &v_req_pi.cdb[0];
1175			lun = vhost_buf_to_lun(v_req_pi.lun);
1176		} else {
1177			tag = vhost64_to_cpu(vq, v_req.tag);
1178			task_attr = v_req.task_attr;
1179			cdb = &v_req.cdb[0];
1180			lun = vhost_buf_to_lun(v_req.lun);
1181		}
1182		/*
1183		 * Check that the received CDB size does not exceeded our
1184		 * hardcoded max for vhost-scsi, then get a pre-allocated
1185		 * cmd descriptor for the new virtio-scsi tag.
1186		 *
1187		 * TODO what if cdb was too small for varlen cdb header?
1188		 */
1189		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1190			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1191				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1192				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1193				goto err;
1194		}
1195		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1196					 exp_data_len + prot_bytes,
1197					 data_direction);
1198		if (IS_ERR(cmd)) {
1199			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1200			       PTR_ERR(cmd));
1201			goto err;
1202		}
1203		cmd->tvc_vhost = vs;
1204		cmd->tvc_vq = vq;
1205		for (i = 0; i < vc.in ; i++)
1206			cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
1207		cmd->tvc_in_iovs = vc.in;
1208
1209		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1210			 cmd->tvc_cdb[0], cmd->tvc_lun);
1211		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1212			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1213
1214		if (data_direction != DMA_NONE) {
1215			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1216						      &prot_iter, exp_data_len,
1217						      &data_iter))) {
1218				vq_err(vq, "Failed to map iov to sgl\n");
1219				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1220				goto err;
1221			}
1222		}
1223		/*
1224		 * Save the descriptor from vhost_get_vq_desc() to be used to
1225		 * complete the virtio-scsi request in TCM callback context via
1226		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1227		 */
1228		cmd->tvc_vq_desc = vc.head;
1229		vhost_scsi_target_queue_cmd(cmd);
1230		ret = 0;
1231err:
1232		/*
1233		 * ENXIO:  No more requests, or read error, wait for next kick
1234		 * EINVAL: Invalid response buffer, drop the request
1235		 * EIO:    Respond with bad target
1236		 * EAGAIN: Pending request
1237		 */
1238		if (ret == -ENXIO)
1239			break;
1240		else if (ret == -EIO)
1241			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1242	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1243out:
1244	mutex_unlock(&vq->mutex);
1245}
1246
1247static void
1248vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1249			 int in_iovs, int vq_desc, struct iovec *resp_iov,
1250			 int tmf_resp_code)
1251{
1252	struct virtio_scsi_ctrl_tmf_resp rsp;
1253	struct iov_iter iov_iter;
1254	int ret;
1255
1256	pr_debug("%s\n", __func__);
1257	memset(&rsp, 0, sizeof(rsp));
1258	rsp.response = tmf_resp_code;
1259
1260	iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1261
1262	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1263	if (likely(ret == sizeof(rsp)))
1264		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1265	else
1266		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1267}
1268
1269static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1270{
1271	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1272						  vwork);
1273	struct vhost_virtqueue *ctl_vq, *vq;
1274	int resp_code, i;
1275
1276	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) {
1277		/*
1278		 * Flush IO vqs that don't share a worker with the ctl to make
1279		 * sure they have sent their responses before us.
1280		 */
1281		ctl_vq = &tmf->vhost->vqs[VHOST_SCSI_VQ_CTL].vq;
1282		for (i = VHOST_SCSI_VQ_IO; i < tmf->vhost->dev.nvqs; i++) {
1283			vq = &tmf->vhost->vqs[i].vq;
1284
1285			if (vhost_vq_is_setup(vq) &&
1286			    vq->worker != ctl_vq->worker)
1287				vhost_vq_flush(vq);
1288		}
1289
1290		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1291	} else {
1292		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1293	}
1294
1295	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1296				 tmf->vq_desc, &tmf->resp_iov, resp_code);
1297	vhost_scsi_release_tmf_res(tmf);
1298}
1299
1300static void
1301vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1302		      struct vhost_virtqueue *vq,
1303		      struct virtio_scsi_ctrl_tmf_req *vtmf,
1304		      struct vhost_scsi_ctx *vc)
1305{
1306	struct vhost_scsi_virtqueue *svq = container_of(vq,
1307					struct vhost_scsi_virtqueue, vq);
1308	struct vhost_scsi_tmf *tmf;
1309
1310	if (vhost32_to_cpu(vq, vtmf->subtype) !=
1311	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1312		goto send_reject;
1313
1314	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1315		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1316		goto send_reject;
1317	}
1318
1319	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1320	if (!tmf)
1321		goto send_reject;
1322
1323	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1324	tmf->vhost = vs;
1325	tmf->svq = svq;
1326	tmf->resp_iov = vq->iov[vc->out];
1327	tmf->vq_desc = vc->head;
1328	tmf->in_iovs = vc->in;
1329	tmf->inflight = vhost_scsi_get_inflight(vq);
1330
1331	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1332			      vhost_buf_to_lun(vtmf->lun), NULL,
1333			      TMR_LUN_RESET, GFP_KERNEL, 0,
1334			      TARGET_SCF_ACK_KREF) < 0) {
1335		vhost_scsi_release_tmf_res(tmf);
1336		goto send_reject;
1337	}
1338
1339	return;
1340
1341send_reject:
1342	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1343				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1344}
1345
1346static void
1347vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1348			struct vhost_virtqueue *vq,
1349			struct vhost_scsi_ctx *vc)
1350{
1351	struct virtio_scsi_ctrl_an_resp rsp;
1352	struct iov_iter iov_iter;
1353	int ret;
1354
1355	pr_debug("%s\n", __func__);
1356	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1357	rsp.response = VIRTIO_SCSI_S_OK;
1358
1359	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1360
1361	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1362	if (likely(ret == sizeof(rsp)))
1363		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1364	else
1365		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1366}
1367
1368static void
1369vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1370{
1371	struct vhost_scsi_tpg *tpg;
1372	union {
1373		__virtio32 type;
1374		struct virtio_scsi_ctrl_an_req an;
1375		struct virtio_scsi_ctrl_tmf_req tmf;
1376	} v_req;
1377	struct vhost_scsi_ctx vc;
1378	size_t typ_size;
1379	int ret, c = 0;
1380
1381	mutex_lock(&vq->mutex);
1382	/*
1383	 * We can handle the vq only after the endpoint is setup by calling the
1384	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1385	 */
1386	if (!vhost_vq_get_backend(vq))
1387		goto out;
1388
1389	memset(&vc, 0, sizeof(vc));
1390
1391	vhost_disable_notify(&vs->dev, vq);
1392
1393	do {
1394		ret = vhost_scsi_get_desc(vs, vq, &vc);
1395		if (ret)
1396			goto err;
1397
1398		/*
1399		 * Get the request type first in order to setup
1400		 * other parameters dependent on the type.
1401		 */
1402		vc.req = &v_req.type;
1403		typ_size = sizeof(v_req.type);
1404
1405		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1406						  &vc.out_iter))) {
1407			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1408			/*
1409			 * The size of the response buffer depends on the
1410			 * request type and must be validated against it.
1411			 * Since the request type is not known, don't send
1412			 * a response.
1413			 */
1414			continue;
1415		}
1416
1417		switch (vhost32_to_cpu(vq, v_req.type)) {
1418		case VIRTIO_SCSI_T_TMF:
1419			vc.req = &v_req.tmf;
1420			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1421			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1422			vc.lunp = &v_req.tmf.lun[0];
1423			vc.target = &v_req.tmf.lun[1];
1424			break;
1425		case VIRTIO_SCSI_T_AN_QUERY:
1426		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1427			vc.req = &v_req.an;
1428			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1429			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1430			vc.lunp = &v_req.an.lun[0];
1431			vc.target = NULL;
1432			break;
1433		default:
1434			vq_err(vq, "Unknown control request %d", v_req.type);
1435			continue;
1436		}
1437
1438		/*
1439		 * Validate the size of request and response buffers.
1440		 * Check for a sane response buffer so we can report
1441		 * early errors back to the guest.
1442		 */
1443		ret = vhost_scsi_chk_size(vq, &vc);
1444		if (ret)
1445			goto err;
1446
1447		/*
1448		 * Get the rest of the request now that its size is known.
1449		 */
1450		vc.req += typ_size;
1451		vc.req_size -= typ_size;
1452
1453		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1454		if (ret)
1455			goto err;
1456
1457		if (v_req.type == VIRTIO_SCSI_T_TMF)
1458			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1459		else
1460			vhost_scsi_send_an_resp(vs, vq, &vc);
1461err:
1462		/*
1463		 * ENXIO:  No more requests, or read error, wait for next kick
1464		 * EINVAL: Invalid response buffer, drop the request
1465		 * EIO:    Respond with bad target
1466		 * EAGAIN: Pending request
1467		 */
1468		if (ret == -ENXIO)
1469			break;
1470		else if (ret == -EIO)
1471			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1472	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1473out:
1474	mutex_unlock(&vq->mutex);
1475}
1476
1477static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1478{
1479	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1480						poll.work);
1481	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1482
1483	pr_debug("%s: The handling func for control queue.\n", __func__);
1484	vhost_scsi_ctl_handle_vq(vs, vq);
1485}
1486
1487static void
1488vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1489		    struct vhost_scsi_tpg *tpg, struct se_lun *lun,
1490		    u32 event, u32 reason)
1491{
1492	struct vhost_scsi_evt *evt;
1493
1494	evt = vhost_scsi_allocate_evt(vs, event, reason);
1495	if (!evt)
1496		return;
1497
1498	if (tpg && lun) {
1499		/* TODO: share lun setup code with virtio-scsi.ko */
1500		/*
1501		 * Note: evt->event is zeroed when we allocate it and
1502		 * lun[4-7] need to be zero according to virtio-scsi spec.
1503		 */
1504		evt->event.lun[0] = 0x01;
1505		evt->event.lun[1] = tpg->tport_tpgt;
1506		if (lun->unpacked_lun >= 256)
1507			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1508		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1509	}
1510
1511	llist_add(&evt->list, &vs->vs_event_list);
1512	vhost_vq_work_queue(vq, &vs->vs_event_work);
1513}
1514
1515static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1516{
1517	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1518						poll.work);
1519	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1520
1521	mutex_lock(&vq->mutex);
1522	if (!vhost_vq_get_backend(vq))
1523		goto out;
1524
1525	if (vs->vs_events_missed)
1526		vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
1527				    0);
1528out:
1529	mutex_unlock(&vq->mutex);
1530}
1531
1532static void vhost_scsi_handle_kick(struct vhost_work *work)
1533{
1534	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1535						poll.work);
1536	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1537
1538	vhost_scsi_handle_vq(vs, vq);
1539}
1540
1541/* Callers must hold dev mutex */
1542static void vhost_scsi_flush(struct vhost_scsi *vs)
1543{
1544	int i;
1545
1546	/* Init new inflight and remember the old inflight */
1547	vhost_scsi_init_inflight(vs, vs->old_inflight);
1548
1549	/*
1550	 * The inflight->kref was initialized to 1. We decrement it here to
1551	 * indicate the start of the flush operation so that it will reach 0
1552	 * when all the reqs are finished.
1553	 */
1554	for (i = 0; i < vs->dev.nvqs; i++)
1555		kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1556
1557	/* Flush both the vhost poll and vhost work */
1558	vhost_dev_flush(&vs->dev);
1559
1560	/* Wait for all reqs issued before the flush to be finished */
1561	for (i = 0; i < vs->dev.nvqs; i++)
1562		wait_for_completion(&vs->old_inflight[i]->comp);
1563}
1564
1565static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1566{
1567	struct vhost_scsi_virtqueue *svq = container_of(vq,
1568					struct vhost_scsi_virtqueue, vq);
1569	struct vhost_scsi_cmd *tv_cmd;
1570	unsigned int i;
1571
1572	if (!svq->scsi_cmds)
1573		return;
1574
1575	for (i = 0; i < svq->max_cmds; i++) {
1576		tv_cmd = &svq->scsi_cmds[i];
1577
1578		kfree(tv_cmd->tvc_sgl);
1579		kfree(tv_cmd->tvc_prot_sgl);
1580		kfree(tv_cmd->tvc_upages);
1581		kfree(tv_cmd->tvc_resp_iov);
1582	}
1583
1584	sbitmap_free(&svq->scsi_tags);
1585	kfree(svq->scsi_cmds);
1586	svq->scsi_cmds = NULL;
1587}
1588
1589static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1590{
1591	struct vhost_scsi_virtqueue *svq = container_of(vq,
1592					struct vhost_scsi_virtqueue, vq);
1593	struct vhost_scsi_cmd *tv_cmd;
1594	unsigned int i;
1595
1596	if (svq->scsi_cmds)
1597		return 0;
1598
1599	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1600			      NUMA_NO_NODE, false, true))
1601		return -ENOMEM;
1602	svq->max_cmds = max_cmds;
1603
1604	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1605	if (!svq->scsi_cmds) {
1606		sbitmap_free(&svq->scsi_tags);
1607		return -ENOMEM;
1608	}
1609
1610	for (i = 0; i < max_cmds; i++) {
1611		tv_cmd = &svq->scsi_cmds[i];
1612
1613		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1614					  sizeof(struct scatterlist),
1615					  GFP_KERNEL);
1616		if (!tv_cmd->tvc_sgl) {
1617			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1618			goto out;
1619		}
1620
1621		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1622					     sizeof(struct page *),
1623					     GFP_KERNEL);
1624		if (!tv_cmd->tvc_upages) {
1625			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1626			goto out;
1627		}
1628
1629		tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
1630					       sizeof(struct iovec),
1631					       GFP_KERNEL);
1632		if (!tv_cmd->tvc_resp_iov) {
1633			pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
1634			goto out;
1635		}
1636
1637		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1638					       sizeof(struct scatterlist),
1639					       GFP_KERNEL);
1640		if (!tv_cmd->tvc_prot_sgl) {
1641			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1642			goto out;
1643		}
1644	}
1645	return 0;
1646out:
1647	vhost_scsi_destroy_vq_cmds(vq);
1648	return -ENOMEM;
1649}
1650
1651/*
1652 * Called from vhost_scsi_ioctl() context to walk the list of available
1653 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1654 *
1655 *  The lock nesting rule is:
1656 *    vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1657 */
1658static int
1659vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1660			struct vhost_scsi_target *t)
1661{
1662	struct se_portal_group *se_tpg;
1663	struct vhost_scsi_tport *tv_tport;
1664	struct vhost_scsi_tpg *tpg;
1665	struct vhost_scsi_tpg **vs_tpg;
1666	struct vhost_virtqueue *vq;
1667	int index, ret, i, len;
1668	bool match = false;
1669
1670	mutex_lock(&vs->dev.mutex);
1671
1672	/* Verify that ring has been setup correctly. */
1673	for (index = 0; index < vs->dev.nvqs; ++index) {
1674		/* Verify that ring has been setup correctly. */
1675		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1676			ret = -EFAULT;
1677			goto out;
1678		}
1679	}
1680
1681	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1682	vs_tpg = kzalloc(len, GFP_KERNEL);
1683	if (!vs_tpg) {
1684		ret = -ENOMEM;
1685		goto out;
1686	}
1687	if (vs->vs_tpg)
1688		memcpy(vs_tpg, vs->vs_tpg, len);
1689
1690	mutex_lock(&vhost_scsi_mutex);
1691	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1692		mutex_lock(&tpg->tv_tpg_mutex);
1693		if (!tpg->tpg_nexus) {
1694			mutex_unlock(&tpg->tv_tpg_mutex);
1695			continue;
1696		}
1697		if (tpg->tv_tpg_vhost_count != 0) {
1698			mutex_unlock(&tpg->tv_tpg_mutex);
1699			continue;
1700		}
1701		tv_tport = tpg->tport;
1702
1703		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1704			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1705				mutex_unlock(&tpg->tv_tpg_mutex);
1706				mutex_unlock(&vhost_scsi_mutex);
1707				ret = -EEXIST;
1708				goto undepend;
1709			}
1710			/*
1711			 * In order to ensure individual vhost-scsi configfs
1712			 * groups cannot be removed while in use by vhost ioctl,
1713			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1714			 * dependency now.
1715			 */
1716			se_tpg = &tpg->se_tpg;
1717			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1718			if (ret) {
1719				pr_warn("target_depend_item() failed: %d\n", ret);
1720				mutex_unlock(&tpg->tv_tpg_mutex);
1721				mutex_unlock(&vhost_scsi_mutex);
1722				goto undepend;
1723			}
1724			tpg->tv_tpg_vhost_count++;
1725			tpg->vhost_scsi = vs;
1726			vs_tpg[tpg->tport_tpgt] = tpg;
1727			match = true;
1728		}
1729		mutex_unlock(&tpg->tv_tpg_mutex);
1730	}
1731	mutex_unlock(&vhost_scsi_mutex);
1732
1733	if (match) {
1734		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1735		       sizeof(vs->vs_vhost_wwpn));
1736
1737		for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
1738			vq = &vs->vqs[i].vq;
1739			if (!vhost_vq_is_setup(vq))
1740				continue;
1741
1742			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1743			if (ret)
1744				goto destroy_vq_cmds;
1745		}
1746
1747		for (i = 0; i < vs->dev.nvqs; i++) {
1748			vq = &vs->vqs[i].vq;
1749			mutex_lock(&vq->mutex);
1750			vhost_vq_set_backend(vq, vs_tpg);
1751			vhost_vq_init_access(vq);
1752			mutex_unlock(&vq->mutex);
1753		}
1754		ret = 0;
1755	} else {
1756		ret = -EEXIST;
1757	}
1758
1759	/*
1760	 * Act as synchronize_rcu to make sure access to
1761	 * old vs->vs_tpg is finished.
1762	 */
1763	vhost_scsi_flush(vs);
1764	kfree(vs->vs_tpg);
1765	vs->vs_tpg = vs_tpg;
1766	goto out;
1767
1768destroy_vq_cmds:
1769	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1770		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1771			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1772	}
1773undepend:
1774	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1775		tpg = vs_tpg[i];
1776		if (tpg) {
1777			mutex_lock(&tpg->tv_tpg_mutex);
1778			tpg->vhost_scsi = NULL;
1779			tpg->tv_tpg_vhost_count--;
1780			mutex_unlock(&tpg->tv_tpg_mutex);
1781			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1782		}
1783	}
1784	kfree(vs_tpg);
1785out:
1786	mutex_unlock(&vs->dev.mutex);
1787	return ret;
1788}
1789
1790static int
1791vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1792			  struct vhost_scsi_target *t)
1793{
1794	struct se_portal_group *se_tpg;
1795	struct vhost_scsi_tport *tv_tport;
1796	struct vhost_scsi_tpg *tpg;
1797	struct vhost_virtqueue *vq;
1798	bool match = false;
1799	int index, ret, i;
1800	u8 target;
1801
1802	mutex_lock(&vs->dev.mutex);
1803	/* Verify that ring has been setup correctly. */
1804	for (index = 0; index < vs->dev.nvqs; ++index) {
1805		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1806			ret = -EFAULT;
1807			goto err_dev;
1808		}
1809	}
1810
1811	if (!vs->vs_tpg) {
1812		ret = 0;
1813		goto err_dev;
1814	}
1815
1816	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1817		target = i;
1818		tpg = vs->vs_tpg[target];
1819		if (!tpg)
1820			continue;
1821
1822		tv_tport = tpg->tport;
1823		if (!tv_tport) {
1824			ret = -ENODEV;
1825			goto err_dev;
1826		}
1827
1828		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1829			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1830				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1831				tv_tport->tport_name, tpg->tport_tpgt,
1832				t->vhost_wwpn, t->vhost_tpgt);
1833			ret = -EINVAL;
1834			goto err_dev;
1835		}
1836		match = true;
1837	}
1838	if (!match)
1839		goto free_vs_tpg;
1840
1841	/* Prevent new cmds from starting and accessing the tpgs/sessions */
1842	for (i = 0; i < vs->dev.nvqs; i++) {
1843		vq = &vs->vqs[i].vq;
1844		mutex_lock(&vq->mutex);
1845		vhost_vq_set_backend(vq, NULL);
1846		mutex_unlock(&vq->mutex);
1847	}
1848	/* Make sure cmds are not running before tearing them down. */
1849	vhost_scsi_flush(vs);
1850
1851	for (i = 0; i < vs->dev.nvqs; i++) {
1852		vq = &vs->vqs[i].vq;
1853		vhost_scsi_destroy_vq_cmds(vq);
1854	}
1855
1856	/*
1857	 * We can now release our hold on the tpg and sessions and userspace
1858	 * can free them after this point.
1859	 */
1860	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1861		target = i;
1862		tpg = vs->vs_tpg[target];
1863		if (!tpg)
1864			continue;
1865
1866		mutex_lock(&tpg->tv_tpg_mutex);
1867
1868		tpg->tv_tpg_vhost_count--;
1869		tpg->vhost_scsi = NULL;
1870		vs->vs_tpg[target] = NULL;
1871
1872		mutex_unlock(&tpg->tv_tpg_mutex);
1873
1874		se_tpg = &tpg->se_tpg;
1875		target_undepend_item(&se_tpg->tpg_group.cg_item);
1876	}
1877
1878free_vs_tpg:
1879	/*
1880	 * Act as synchronize_rcu to make sure access to
1881	 * old vs->vs_tpg is finished.
1882	 */
1883	vhost_scsi_flush(vs);
1884	kfree(vs->vs_tpg);
1885	vs->vs_tpg = NULL;
1886	WARN_ON(vs->vs_events_nr);
1887	mutex_unlock(&vs->dev.mutex);
1888	return 0;
1889
1890err_dev:
1891	mutex_unlock(&vs->dev.mutex);
1892	return ret;
1893}
1894
1895static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1896{
1897	struct vhost_virtqueue *vq;
1898	int i;
1899
1900	if (features & ~VHOST_SCSI_FEATURES)
1901		return -EOPNOTSUPP;
1902
1903	mutex_lock(&vs->dev.mutex);
1904	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1905	    !vhost_log_access_ok(&vs->dev)) {
1906		mutex_unlock(&vs->dev.mutex);
1907		return -EFAULT;
1908	}
1909
1910	for (i = 0; i < vs->dev.nvqs; i++) {
1911		vq = &vs->vqs[i].vq;
1912		mutex_lock(&vq->mutex);
1913		vq->acked_features = features;
1914		mutex_unlock(&vq->mutex);
1915	}
1916	mutex_unlock(&vs->dev.mutex);
1917	return 0;
1918}
1919
1920static int vhost_scsi_open(struct inode *inode, struct file *f)
1921{
1922	struct vhost_scsi_virtqueue *svq;
1923	struct vhost_scsi *vs;
1924	struct vhost_virtqueue **vqs;
1925	int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
1926
1927	vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1928	if (!vs)
1929		goto err_vs;
1930
1931	if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
1932		pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
1933		       VHOST_SCSI_MAX_IO_VQ);
1934		nvqs = VHOST_SCSI_MAX_IO_VQ;
1935	} else if (nvqs == 0) {
1936		pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
1937		nvqs = 1;
1938	}
1939	nvqs += VHOST_SCSI_VQ_IO;
1940
1941	vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1942					 GFP_KERNEL | __GFP_ZERO);
1943	if (!vs->old_inflight)
1944		goto err_inflight;
1945
1946	vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1947				GFP_KERNEL | __GFP_ZERO);
1948	if (!vs->vqs)
1949		goto err_vqs;
1950
1951	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1952	if (!vqs)
1953		goto err_local_vqs;
1954
1955	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1956
1957	vs->vs_events_nr = 0;
1958	vs->vs_events_missed = false;
1959
1960	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1961	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1962	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1963	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1964	for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
1965		svq = &vs->vqs[i];
1966
1967		vqs[i] = &svq->vq;
1968		svq->vs = vs;
1969		init_llist_head(&svq->completion_list);
1970		vhost_work_init(&svq->completion_work,
1971				vhost_scsi_complete_cmd_work);
1972		svq->vq.handle_kick = vhost_scsi_handle_kick;
1973	}
1974	vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
1975		       VHOST_SCSI_WEIGHT, 0, true, NULL);
1976
1977	vhost_scsi_init_inflight(vs, NULL);
1978
1979	f->private_data = vs;
1980	return 0;
1981
1982err_local_vqs:
1983	kfree(vs->vqs);
1984err_vqs:
1985	kfree(vs->old_inflight);
1986err_inflight:
1987	kvfree(vs);
1988err_vs:
1989	return r;
1990}
1991
1992static int vhost_scsi_release(struct inode *inode, struct file *f)
1993{
1994	struct vhost_scsi *vs = f->private_data;
1995	struct vhost_scsi_target t;
1996
1997	mutex_lock(&vs->dev.mutex);
1998	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1999	mutex_unlock(&vs->dev.mutex);
2000	vhost_scsi_clear_endpoint(vs, &t);
2001	vhost_dev_stop(&vs->dev);
2002	vhost_dev_cleanup(&vs->dev);
2003	kfree(vs->dev.vqs);
2004	kfree(vs->vqs);
2005	kfree(vs->old_inflight);
2006	kvfree(vs);
2007	return 0;
2008}
2009
2010static long
2011vhost_scsi_ioctl(struct file *f,
2012		 unsigned int ioctl,
2013		 unsigned long arg)
2014{
2015	struct vhost_scsi *vs = f->private_data;
2016	struct vhost_scsi_target backend;
2017	void __user *argp = (void __user *)arg;
2018	u64 __user *featurep = argp;
2019	u32 __user *eventsp = argp;
2020	u32 events_missed;
2021	u64 features;
2022	int r, abi_version = VHOST_SCSI_ABI_VERSION;
2023	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2024
2025	switch (ioctl) {
2026	case VHOST_SCSI_SET_ENDPOINT:
2027		if (copy_from_user(&backend, argp, sizeof backend))
2028			return -EFAULT;
2029		if (backend.reserved != 0)
2030			return -EOPNOTSUPP;
2031
2032		return vhost_scsi_set_endpoint(vs, &backend);
2033	case VHOST_SCSI_CLEAR_ENDPOINT:
2034		if (copy_from_user(&backend, argp, sizeof backend))
2035			return -EFAULT;
2036		if (backend.reserved != 0)
2037			return -EOPNOTSUPP;
2038
2039		return vhost_scsi_clear_endpoint(vs, &backend);
2040	case VHOST_SCSI_GET_ABI_VERSION:
2041		if (copy_to_user(argp, &abi_version, sizeof abi_version))
2042			return -EFAULT;
2043		return 0;
2044	case VHOST_SCSI_SET_EVENTS_MISSED:
2045		if (get_user(events_missed, eventsp))
2046			return -EFAULT;
2047		mutex_lock(&vq->mutex);
2048		vs->vs_events_missed = events_missed;
2049		mutex_unlock(&vq->mutex);
2050		return 0;
2051	case VHOST_SCSI_GET_EVENTS_MISSED:
2052		mutex_lock(&vq->mutex);
2053		events_missed = vs->vs_events_missed;
2054		mutex_unlock(&vq->mutex);
2055		if (put_user(events_missed, eventsp))
2056			return -EFAULT;
2057		return 0;
2058	case VHOST_GET_FEATURES:
2059		features = VHOST_SCSI_FEATURES;
2060		if (copy_to_user(featurep, &features, sizeof features))
2061			return -EFAULT;
2062		return 0;
2063	case VHOST_SET_FEATURES:
2064		if (copy_from_user(&features, featurep, sizeof features))
2065			return -EFAULT;
2066		return vhost_scsi_set_features(vs, features);
2067	case VHOST_NEW_WORKER:
2068	case VHOST_FREE_WORKER:
2069	case VHOST_ATTACH_VRING_WORKER:
2070	case VHOST_GET_VRING_WORKER:
2071		mutex_lock(&vs->dev.mutex);
2072		r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
2073		mutex_unlock(&vs->dev.mutex);
2074		return r;
2075	default:
2076		mutex_lock(&vs->dev.mutex);
2077		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
2078		/* TODO: flush backend after dev ioctl. */
2079		if (r == -ENOIOCTLCMD)
2080			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
2081		mutex_unlock(&vs->dev.mutex);
2082		return r;
2083	}
2084}
2085
2086static const struct file_operations vhost_scsi_fops = {
2087	.owner          = THIS_MODULE,
2088	.release        = vhost_scsi_release,
2089	.unlocked_ioctl = vhost_scsi_ioctl,
2090	.compat_ioctl	= compat_ptr_ioctl,
2091	.open           = vhost_scsi_open,
2092	.llseek		= noop_llseek,
2093};
2094
2095static struct miscdevice vhost_scsi_misc = {
2096	MISC_DYNAMIC_MINOR,
2097	"vhost-scsi",
2098	&vhost_scsi_fops,
2099};
2100
2101static int __init vhost_scsi_register(void)
2102{
2103	return misc_register(&vhost_scsi_misc);
2104}
2105
2106static void vhost_scsi_deregister(void)
2107{
2108	misc_deregister(&vhost_scsi_misc);
2109}
2110
2111static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
2112{
2113	switch (tport->tport_proto_id) {
2114	case SCSI_PROTOCOL_SAS:
2115		return "SAS";
2116	case SCSI_PROTOCOL_FCP:
2117		return "FCP";
2118	case SCSI_PROTOCOL_ISCSI:
2119		return "iSCSI";
2120	default:
2121		break;
2122	}
2123
2124	return "Unknown";
2125}
2126
2127static void
2128vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
2129		  struct se_lun *lun, bool plug)
2130{
2131
2132	struct vhost_scsi *vs = tpg->vhost_scsi;
2133	struct vhost_virtqueue *vq;
2134	u32 reason;
2135
2136	if (!vs)
2137		return;
2138
2139	if (plug)
2140		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
2141	else
2142		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2143
2144	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2145	mutex_lock(&vq->mutex);
2146	/*
2147	 * We can't queue events if the backend has been cleared, because
2148	 * we could end up queueing an event after the flush.
2149	 */
2150	if (!vhost_vq_get_backend(vq))
2151		goto unlock;
2152
2153	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2154		vhost_scsi_send_evt(vs, vq, tpg, lun,
2155				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2156unlock:
2157	mutex_unlock(&vq->mutex);
2158}
2159
2160static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2161{
2162	vhost_scsi_do_plug(tpg, lun, true);
2163}
2164
2165static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2166{
2167	vhost_scsi_do_plug(tpg, lun, false);
2168}
2169
2170static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2171			       struct se_lun *lun)
2172{
2173	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2174				struct vhost_scsi_tpg, se_tpg);
2175
2176	mutex_lock(&tpg->tv_tpg_mutex);
2177	tpg->tv_tpg_port_count++;
2178	vhost_scsi_hotplug(tpg, lun);
2179	mutex_unlock(&tpg->tv_tpg_mutex);
2180
2181	return 0;
2182}
2183
2184static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2185				  struct se_lun *lun)
2186{
2187	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2188				struct vhost_scsi_tpg, se_tpg);
2189
2190	mutex_lock(&tpg->tv_tpg_mutex);
2191	tpg->tv_tpg_port_count--;
2192	vhost_scsi_hotunplug(tpg, lun);
2193	mutex_unlock(&tpg->tv_tpg_mutex);
2194}
2195
2196static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2197		struct config_item *item, const char *page, size_t count)
2198{
2199	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2200	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2201				struct vhost_scsi_tpg, se_tpg);
2202	unsigned long val;
2203	int ret = kstrtoul(page, 0, &val);
2204
2205	if (ret) {
2206		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2207		return ret;
2208	}
2209	if (val != 0 && val != 1 && val != 3) {
2210		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2211		return -EINVAL;
2212	}
2213	tpg->tv_fabric_prot_type = val;
2214
2215	return count;
2216}
2217
2218static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2219		struct config_item *item, char *page)
2220{
2221	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2222	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2223				struct vhost_scsi_tpg, se_tpg);
2224
2225	return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2226}
2227
2228CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2229
2230static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2231	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2232	NULL,
2233};
2234
2235static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2236				const char *name)
2237{
2238	struct vhost_scsi_nexus *tv_nexus;
2239
2240	mutex_lock(&tpg->tv_tpg_mutex);
2241	if (tpg->tpg_nexus) {
2242		mutex_unlock(&tpg->tv_tpg_mutex);
2243		pr_debug("tpg->tpg_nexus already exists\n");
2244		return -EEXIST;
2245	}
2246
2247	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2248	if (!tv_nexus) {
2249		mutex_unlock(&tpg->tv_tpg_mutex);
2250		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2251		return -ENOMEM;
2252	}
2253	/*
2254	 * Since we are running in 'demo mode' this call with generate a
2255	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2256	 * the SCSI Initiator port name of the passed configfs group 'name'.
2257	 */
2258	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2259					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2260					(unsigned char *)name, tv_nexus, NULL);
2261	if (IS_ERR(tv_nexus->tvn_se_sess)) {
2262		mutex_unlock(&tpg->tv_tpg_mutex);
2263		kfree(tv_nexus);
2264		return -ENOMEM;
2265	}
2266	tpg->tpg_nexus = tv_nexus;
2267
2268	mutex_unlock(&tpg->tv_tpg_mutex);
2269	return 0;
2270}
2271
2272static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2273{
2274	struct se_session *se_sess;
2275	struct vhost_scsi_nexus *tv_nexus;
2276
2277	mutex_lock(&tpg->tv_tpg_mutex);
2278	tv_nexus = tpg->tpg_nexus;
2279	if (!tv_nexus) {
2280		mutex_unlock(&tpg->tv_tpg_mutex);
2281		return -ENODEV;
2282	}
2283
2284	se_sess = tv_nexus->tvn_se_sess;
2285	if (!se_sess) {
2286		mutex_unlock(&tpg->tv_tpg_mutex);
2287		return -ENODEV;
2288	}
2289
2290	if (tpg->tv_tpg_port_count != 0) {
2291		mutex_unlock(&tpg->tv_tpg_mutex);
2292		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2293			" active TPG port count: %d\n",
2294			tpg->tv_tpg_port_count);
2295		return -EBUSY;
2296	}
2297
2298	if (tpg->tv_tpg_vhost_count != 0) {
2299		mutex_unlock(&tpg->tv_tpg_mutex);
2300		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2301			" active TPG vhost count: %d\n",
2302			tpg->tv_tpg_vhost_count);
2303		return -EBUSY;
2304	}
2305
2306	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2307		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2308		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2309
2310	/*
2311	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2312	 */
2313	target_remove_session(se_sess);
2314	tpg->tpg_nexus = NULL;
2315	mutex_unlock(&tpg->tv_tpg_mutex);
2316
2317	kfree(tv_nexus);
2318	return 0;
2319}
2320
2321static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2322{
2323	struct se_portal_group *se_tpg = to_tpg(item);
2324	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2325				struct vhost_scsi_tpg, se_tpg);
2326	struct vhost_scsi_nexus *tv_nexus;
2327	ssize_t ret;
2328
2329	mutex_lock(&tpg->tv_tpg_mutex);
2330	tv_nexus = tpg->tpg_nexus;
2331	if (!tv_nexus) {
2332		mutex_unlock(&tpg->tv_tpg_mutex);
2333		return -ENODEV;
2334	}
2335	ret = sysfs_emit(page, "%s\n",
2336			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2337	mutex_unlock(&tpg->tv_tpg_mutex);
2338
2339	return ret;
2340}
2341
2342static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2343		const char *page, size_t count)
2344{
2345	struct se_portal_group *se_tpg = to_tpg(item);
2346	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2347				struct vhost_scsi_tpg, se_tpg);
2348	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2349	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2350	int ret;
2351	/*
2352	 * Shutdown the active I_T nexus if 'NULL' is passed..
2353	 */
2354	if (!strncmp(page, "NULL", 4)) {
2355		ret = vhost_scsi_drop_nexus(tpg);
2356		return (!ret) ? count : ret;
2357	}
2358	/*
2359	 * Otherwise make sure the passed virtual Initiator port WWN matches
2360	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2361	 * vhost_scsi_make_nexus().
2362	 */
2363	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2364		pr_err("Emulated NAA Sas Address: %s, exceeds"
2365				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2366		return -EINVAL;
2367	}
2368	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2369
2370	ptr = strstr(i_port, "naa.");
2371	if (ptr) {
2372		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2373			pr_err("Passed SAS Initiator Port %s does not"
2374				" match target port protoid: %s\n", i_port,
2375				vhost_scsi_dump_proto_id(tport_wwn));
2376			return -EINVAL;
2377		}
2378		port_ptr = &i_port[0];
2379		goto check_newline;
2380	}
2381	ptr = strstr(i_port, "fc.");
2382	if (ptr) {
2383		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2384			pr_err("Passed FCP Initiator Port %s does not"
2385				" match target port protoid: %s\n", i_port,
2386				vhost_scsi_dump_proto_id(tport_wwn));
2387			return -EINVAL;
2388		}
2389		port_ptr = &i_port[3]; /* Skip over "fc." */
2390		goto check_newline;
2391	}
2392	ptr = strstr(i_port, "iqn.");
2393	if (ptr) {
2394		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2395			pr_err("Passed iSCSI Initiator Port %s does not"
2396				" match target port protoid: %s\n", i_port,
2397				vhost_scsi_dump_proto_id(tport_wwn));
2398			return -EINVAL;
2399		}
2400		port_ptr = &i_port[0];
2401		goto check_newline;
2402	}
2403	pr_err("Unable to locate prefix for emulated Initiator Port:"
2404			" %s\n", i_port);
2405	return -EINVAL;
2406	/*
2407	 * Clear any trailing newline for the NAA WWN
2408	 */
2409check_newline:
2410	if (i_port[strlen(i_port)-1] == '\n')
2411		i_port[strlen(i_port)-1] = '\0';
2412
2413	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2414	if (ret < 0)
2415		return ret;
2416
2417	return count;
2418}
2419
2420CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2421
2422static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2423	&vhost_scsi_tpg_attr_nexus,
2424	NULL,
2425};
2426
2427static struct se_portal_group *
2428vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2429{
2430	struct vhost_scsi_tport *tport = container_of(wwn,
2431			struct vhost_scsi_tport, tport_wwn);
2432
2433	struct vhost_scsi_tpg *tpg;
2434	u16 tpgt;
2435	int ret;
2436
2437	if (strstr(name, "tpgt_") != name)
2438		return ERR_PTR(-EINVAL);
2439	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2440		return ERR_PTR(-EINVAL);
2441
2442	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2443	if (!tpg) {
2444		pr_err("Unable to allocate struct vhost_scsi_tpg");
2445		return ERR_PTR(-ENOMEM);
2446	}
2447	mutex_init(&tpg->tv_tpg_mutex);
2448	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2449	tpg->tport = tport;
2450	tpg->tport_tpgt = tpgt;
2451
2452	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2453	if (ret < 0) {
2454		kfree(tpg);
2455		return NULL;
2456	}
2457	mutex_lock(&vhost_scsi_mutex);
2458	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2459	mutex_unlock(&vhost_scsi_mutex);
2460
2461	return &tpg->se_tpg;
2462}
2463
2464static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2465{
2466	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2467				struct vhost_scsi_tpg, se_tpg);
2468
2469	mutex_lock(&vhost_scsi_mutex);
2470	list_del(&tpg->tv_tpg_list);
2471	mutex_unlock(&vhost_scsi_mutex);
2472	/*
2473	 * Release the virtual I_T Nexus for this vhost TPG
2474	 */
2475	vhost_scsi_drop_nexus(tpg);
2476	/*
2477	 * Deregister the se_tpg from TCM..
2478	 */
2479	core_tpg_deregister(se_tpg);
2480	kfree(tpg);
2481}
2482
2483static struct se_wwn *
2484vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2485		     struct config_group *group,
2486		     const char *name)
2487{
2488	struct vhost_scsi_tport *tport;
2489	char *ptr;
2490	u64 wwpn = 0;
2491	int off = 0;
2492
2493	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2494		return ERR_PTR(-EINVAL); */
2495
2496	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2497	if (!tport) {
2498		pr_err("Unable to allocate struct vhost_scsi_tport");
2499		return ERR_PTR(-ENOMEM);
2500	}
2501	tport->tport_wwpn = wwpn;
2502	/*
2503	 * Determine the emulated Protocol Identifier and Target Port Name
2504	 * based on the incoming configfs directory name.
2505	 */
2506	ptr = strstr(name, "naa.");
2507	if (ptr) {
2508		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2509		goto check_len;
2510	}
2511	ptr = strstr(name, "fc.");
2512	if (ptr) {
2513		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2514		off = 3; /* Skip over "fc." */
2515		goto check_len;
2516	}
2517	ptr = strstr(name, "iqn.");
2518	if (ptr) {
2519		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2520		goto check_len;
2521	}
2522
2523	pr_err("Unable to locate prefix for emulated Target Port:"
2524			" %s\n", name);
2525	kfree(tport);
2526	return ERR_PTR(-EINVAL);
2527
2528check_len:
2529	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2530		pr_err("Emulated %s Address: %s, exceeds"
2531			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2532			VHOST_SCSI_NAMELEN);
2533		kfree(tport);
2534		return ERR_PTR(-EINVAL);
2535	}
2536	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2537
2538	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2539		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2540
2541	return &tport->tport_wwn;
2542}
2543
2544static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2545{
2546	struct vhost_scsi_tport *tport = container_of(wwn,
2547				struct vhost_scsi_tport, tport_wwn);
2548
2549	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2550		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2551		tport->tport_name);
2552
2553	kfree(tport);
2554}
2555
2556static ssize_t
2557vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2558{
2559	return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2560		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2561		utsname()->machine);
2562}
2563
2564CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2565
2566static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2567	&vhost_scsi_wwn_attr_version,
2568	NULL,
2569};
2570
2571static const struct target_core_fabric_ops vhost_scsi_ops = {
2572	.module				= THIS_MODULE,
2573	.fabric_name			= "vhost",
2574	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2575	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2576	.tpg_get_tag			= vhost_scsi_get_tpgt,
2577	.tpg_check_demo_mode		= vhost_scsi_check_true,
2578	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2579	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2580	.release_cmd			= vhost_scsi_release_cmd,
2581	.check_stop_free		= vhost_scsi_check_stop_free,
2582	.sess_get_initiator_sid		= NULL,
2583	.write_pending			= vhost_scsi_write_pending,
2584	.queue_data_in			= vhost_scsi_queue_data_in,
2585	.queue_status			= vhost_scsi_queue_status,
2586	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2587	.aborted_task			= vhost_scsi_aborted_task,
2588	/*
2589	 * Setup callers for generic logic in target_core_fabric_configfs.c
2590	 */
2591	.fabric_make_wwn		= vhost_scsi_make_tport,
2592	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2593	.fabric_make_tpg		= vhost_scsi_make_tpg,
2594	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2595	.fabric_post_link		= vhost_scsi_port_link,
2596	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2597
2598	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2599	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2600	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2601
2602	.default_submit_type		= TARGET_QUEUE_SUBMIT,
2603	.direct_submit_supp		= 1,
2604};
2605
2606static int __init vhost_scsi_init(void)
2607{
2608	int ret = -ENOMEM;
2609
2610	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2611		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2612		utsname()->machine);
2613
2614	ret = vhost_scsi_register();
2615	if (ret < 0)
2616		goto out;
2617
2618	ret = target_register_template(&vhost_scsi_ops);
2619	if (ret < 0)
2620		goto out_vhost_scsi_deregister;
2621
2622	return 0;
2623
2624out_vhost_scsi_deregister:
2625	vhost_scsi_deregister();
2626out:
2627	return ret;
2628};
2629
2630static void vhost_scsi_exit(void)
2631{
2632	target_unregister_template(&vhost_scsi_ops);
2633	vhost_scsi_deregister();
2634};
2635
2636MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2637MODULE_ALIAS("tcm_vhost");
2638MODULE_LICENSE("GPL");
2639module_init(vhost_scsi_init);
2640module_exit(vhost_scsi_exit);
2641