1/*	$OpenBSD: vscsi.c,v 1.63 2024/05/13 01:15:50 jsg Exp $ */
2
3/*
4 * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20#include <sys/systm.h>
21#include <sys/kernel.h>
22#include <sys/malloc.h>
23#include <sys/device.h>
24#include <sys/conf.h>
25#include <sys/queue.h>
26#include <sys/rwlock.h>
27#include <sys/pool.h>
28#include <sys/task.h>
29#include <sys/ioctl.h>
30#include <sys/event.h>
31
32#include <scsi/scsi_all.h>
33#include <scsi/scsiconf.h>
34
35#include <dev/vscsivar.h>
36
37/*
38 * Locks used to protect struct members and global data
39 *	s	sc_state_mtx
40 */
41
42int		vscsi_match(struct device *, void *, void *);
43void		vscsi_attach(struct device *, struct device *, void *);
44
45struct vscsi_ccb {
46	TAILQ_ENTRY(vscsi_ccb)	ccb_entry;
47	int			ccb_tag;
48	struct scsi_xfer	*ccb_xs;
49	size_t			ccb_datalen;
50};
51
52TAILQ_HEAD(vscsi_ccb_list, vscsi_ccb);
53
54enum vscsi_state {
55	VSCSI_S_CLOSED,
56	VSCSI_S_CONFIG,
57	VSCSI_S_RUNNING
58};
59
60struct vscsi_softc {
61	struct device		sc_dev;
62	struct scsibus_softc	*sc_scsibus;
63
64	struct mutex		sc_state_mtx;
65	enum vscsi_state	sc_state;
66	u_int			sc_ref_count;
67	struct pool		sc_ccb_pool;
68
69	struct scsi_iopool	sc_iopool;
70
71	struct vscsi_ccb_list	sc_ccb_i2t;	/* [s] */
72	struct vscsi_ccb_list	sc_ccb_t2i;
73	int			sc_ccb_tag;
74	struct mutex		sc_poll_mtx;
75	struct rwlock		sc_ioc_lock;
76
77	struct klist		sc_klist;	/* [s] */
78};
79
80#define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
81#define DEV2SC(_d) ((struct vscsi_softc *)device_lookup(&vscsi_cd, minor(_d)))
82
83const struct cfattach vscsi_ca = {
84	sizeof(struct vscsi_softc),
85	vscsi_match,
86	vscsi_attach
87};
88
89struct cfdriver vscsi_cd = {
90	NULL,
91	"vscsi",
92	DV_DULL
93};
94
95void		vscsi_cmd(struct scsi_xfer *);
96int		vscsi_probe(struct scsi_link *);
97void		vscsi_free(struct scsi_link *);
98
99const struct scsi_adapter vscsi_switch = {
100	vscsi_cmd, NULL, vscsi_probe, vscsi_free, NULL
101};
102
103int		vscsi_i2t(struct vscsi_softc *, struct vscsi_ioc_i2t *);
104int		vscsi_data(struct vscsi_softc *, struct vscsi_ioc_data *, int);
105int		vscsi_t2i(struct vscsi_softc *, struct vscsi_ioc_t2i *);
106int		vscsi_devevent(struct vscsi_softc *, u_long,
107		    struct vscsi_ioc_devevent *);
108void		vscsi_devevent_task(void *);
109void		vscsi_done(struct vscsi_softc *, struct vscsi_ccb *);
110
111void *		vscsi_ccb_get(void *);
112void		vscsi_ccb_put(void *, void *);
113
114void		filt_vscsidetach(struct knote *);
115int		filt_vscsiread(struct knote *, long);
116int		filt_vscsimodify(struct kevent *, struct knote *);
117int		filt_vscsiprocess(struct knote *, struct kevent *);
118
119const struct filterops vscsi_filtops = {
120	.f_flags	= FILTEROP_ISFD | FILTEROP_MPSAFE,
121	.f_attach	= NULL,
122	.f_detach	= filt_vscsidetach,
123	.f_event	= filt_vscsiread,
124	.f_modify	= filt_vscsimodify,
125	.f_process	= filt_vscsiprocess,
126};
127
128
129int
130vscsi_match(struct device *parent, void *match, void *aux)
131{
132	return (1);
133}
134
135void
136vscsi_attach(struct device *parent, struct device *self, void *aux)
137{
138	struct vscsi_softc		*sc = (struct vscsi_softc *)self;
139	struct scsibus_attach_args	saa;
140
141	printf("\n");
142
143	mtx_init(&sc->sc_state_mtx, IPL_MPFLOOR);
144	sc->sc_state = VSCSI_S_CLOSED;
145
146	TAILQ_INIT(&sc->sc_ccb_i2t);
147	TAILQ_INIT(&sc->sc_ccb_t2i);
148	mtx_init(&sc->sc_poll_mtx, IPL_BIO);
149	rw_init(&sc->sc_ioc_lock, "vscsiioc");
150	scsi_iopool_init(&sc->sc_iopool, sc, vscsi_ccb_get, vscsi_ccb_put);
151	klist_init_mutex(&sc->sc_klist, &sc->sc_state_mtx);
152
153	saa.saa_adapter = &vscsi_switch;
154	saa.saa_adapter_softc = sc;
155	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
156	saa.saa_adapter_buswidth = 256;
157	saa.saa_luns = 8;
158	saa.saa_openings = 16;
159	saa.saa_pool = &sc->sc_iopool;
160	saa.saa_quirks = saa.saa_flags = 0;
161	saa.saa_wwpn = saa.saa_wwnn = 0;
162
163	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
164	    &saa, scsiprint);
165}
166
167void
168vscsi_cmd(struct scsi_xfer *xs)
169{
170	struct scsi_link		*link = xs->sc_link;
171	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
172	struct vscsi_ccb		*ccb = xs->io;
173	int				polled = ISSET(xs->flags, SCSI_POLL);
174	int				running = 0;
175
176	if (ISSET(xs->flags, SCSI_POLL) && ISSET(xs->flags, SCSI_NOSLEEP)) {
177		printf("%s: POLL && NOSLEEP for 0x%02x\n", DEVNAME(sc),
178		    xs->cmd.opcode);
179		xs->error = XS_DRIVER_STUFFUP;
180		scsi_done(xs);
181		return;
182	}
183
184	ccb->ccb_xs = xs;
185
186	mtx_enter(&sc->sc_state_mtx);
187	if (sc->sc_state == VSCSI_S_RUNNING) {
188		running = 1;
189		TAILQ_INSERT_TAIL(&sc->sc_ccb_i2t, ccb, ccb_entry);
190	}
191	knote_locked(&sc->sc_klist, 0);
192	mtx_leave(&sc->sc_state_mtx);
193
194	if (!running) {
195		xs->error = XS_DRIVER_STUFFUP;
196		scsi_done(xs);
197		return;
198	}
199
200	if (polled) {
201		mtx_enter(&sc->sc_poll_mtx);
202		while (ccb->ccb_xs != NULL)
203			msleep_nsec(ccb, &sc->sc_poll_mtx, PRIBIO, "vscsipoll",
204			    INFSLP);
205		mtx_leave(&sc->sc_poll_mtx);
206		scsi_done(xs);
207	}
208}
209
210void
211vscsi_done(struct vscsi_softc *sc, struct vscsi_ccb *ccb)
212{
213	struct scsi_xfer		*xs = ccb->ccb_xs;
214
215	if (ISSET(xs->flags, SCSI_POLL)) {
216		mtx_enter(&sc->sc_poll_mtx);
217		ccb->ccb_xs = NULL;
218		wakeup(ccb);
219		mtx_leave(&sc->sc_poll_mtx);
220	} else
221		scsi_done(xs);
222}
223
224int
225vscsi_probe(struct scsi_link *link)
226{
227	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
228	int				rv = 0;
229
230	mtx_enter(&sc->sc_state_mtx);
231	if (sc->sc_state == VSCSI_S_RUNNING)
232		sc->sc_ref_count++;
233	else
234		rv = ENXIO;
235	mtx_leave(&sc->sc_state_mtx);
236
237	return (rv);
238}
239
240void
241vscsi_free(struct scsi_link *link)
242{
243	struct vscsi_softc		*sc = link->bus->sb_adapter_softc;
244
245	mtx_enter(&sc->sc_state_mtx);
246	sc->sc_ref_count--;
247	if (sc->sc_state != VSCSI_S_RUNNING && sc->sc_ref_count == 0)
248		wakeup(&sc->sc_ref_count);
249	mtx_leave(&sc->sc_state_mtx);
250}
251
252int
253vscsiopen(dev_t dev, int flags, int mode, struct proc *p)
254{
255	struct vscsi_softc		*sc = DEV2SC(dev);
256	enum vscsi_state		state = VSCSI_S_RUNNING;
257	int				rv = 0;
258
259	if (sc == NULL)
260		return (ENXIO);
261
262	mtx_enter(&sc->sc_state_mtx);
263	if (sc->sc_state != VSCSI_S_CLOSED)
264		rv = EBUSY;
265	else
266		sc->sc_state = VSCSI_S_CONFIG;
267	mtx_leave(&sc->sc_state_mtx);
268
269	if (rv != 0) {
270		device_unref(&sc->sc_dev);
271		return (rv);
272	}
273
274	pool_init(&sc->sc_ccb_pool, sizeof(struct vscsi_ccb), 0, IPL_BIO, 0,
275	    "vscsiccb", NULL);
276
277	/* we need to guarantee some ccbs will be available for the iopool */
278	rv = pool_prime(&sc->sc_ccb_pool, 8);
279	if (rv != 0) {
280		pool_destroy(&sc->sc_ccb_pool);
281		state = VSCSI_S_CLOSED;
282	}
283
284	/* commit changes */
285	mtx_enter(&sc->sc_state_mtx);
286	sc->sc_state = state;
287	mtx_leave(&sc->sc_state_mtx);
288
289	device_unref(&sc->sc_dev);
290	return (rv);
291}
292
293int
294vscsiioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
295{
296	struct vscsi_softc		*sc = DEV2SC(dev);
297	int				read = 0;
298	int				err = 0;
299
300	if (sc == NULL)
301		return (ENXIO);
302
303	rw_enter_write(&sc->sc_ioc_lock);
304
305	switch (cmd) {
306	case VSCSI_I2T:
307		err = vscsi_i2t(sc, (struct vscsi_ioc_i2t *)addr);
308		break;
309
310	case VSCSI_DATA_READ:
311		read = 1;
312	case VSCSI_DATA_WRITE:
313		err = vscsi_data(sc, (struct vscsi_ioc_data *)addr, read);
314		break;
315
316	case VSCSI_T2I:
317		err = vscsi_t2i(sc, (struct vscsi_ioc_t2i *)addr);
318		break;
319
320	case VSCSI_REQPROBE:
321	case VSCSI_REQDETACH:
322		err = vscsi_devevent(sc, cmd,
323		    (struct vscsi_ioc_devevent *)addr);
324		break;
325
326	default:
327		err = ENOTTY;
328		break;
329	}
330
331	rw_exit_write(&sc->sc_ioc_lock);
332
333	device_unref(&sc->sc_dev);
334	return (err);
335}
336
337int
338vscsi_i2t(struct vscsi_softc *sc, struct vscsi_ioc_i2t *i2t)
339{
340	struct vscsi_ccb		*ccb;
341	struct scsi_xfer		*xs;
342	struct scsi_link		*link;
343
344	mtx_enter(&sc->sc_state_mtx);
345	ccb = TAILQ_FIRST(&sc->sc_ccb_i2t);
346	if (ccb != NULL)
347		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
348	mtx_leave(&sc->sc_state_mtx);
349
350	if (ccb == NULL)
351		return (EAGAIN);
352
353	xs = ccb->ccb_xs;
354	link = xs->sc_link;
355
356	i2t->tag = ccb->ccb_tag;
357	i2t->target = link->target;
358	i2t->lun = link->lun;
359	memcpy(&i2t->cmd, &xs->cmd, xs->cmdlen);
360	i2t->cmdlen = xs->cmdlen;
361	i2t->datalen = xs->datalen;
362
363	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
364	case SCSI_DATA_IN:
365		i2t->direction = VSCSI_DIR_READ;
366		break;
367	case SCSI_DATA_OUT:
368		i2t->direction = VSCSI_DIR_WRITE;
369		break;
370	default:
371		i2t->direction = VSCSI_DIR_NONE;
372		break;
373	}
374
375	TAILQ_INSERT_TAIL(&sc->sc_ccb_t2i, ccb, ccb_entry);
376
377	return (0);
378}
379
380int
381vscsi_data(struct vscsi_softc *sc, struct vscsi_ioc_data *data, int read)
382{
383	struct vscsi_ccb		*ccb;
384	struct scsi_xfer		*xs;
385	int				xsread;
386	u_int8_t			*buf;
387	int				rv = EINVAL;
388
389	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
390		if (ccb->ccb_tag == data->tag)
391			break;
392	}
393	if (ccb == NULL)
394		return (EFAULT);
395
396	xs = ccb->ccb_xs;
397
398	if (data->datalen > xs->datalen - ccb->ccb_datalen)
399		return (ENOMEM);
400
401	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
402	case SCSI_DATA_IN:
403		xsread = 1;
404		break;
405	case SCSI_DATA_OUT:
406		xsread = 0;
407		break;
408	default:
409		return (EINVAL);
410	}
411
412	if (read != xsread)
413		return (EINVAL);
414
415	buf = xs->data;
416	buf += ccb->ccb_datalen;
417
418	if (read)
419		rv = copyin(data->data, buf, data->datalen);
420	else
421		rv = copyout(buf, data->data, data->datalen);
422
423	if (rv == 0)
424		ccb->ccb_datalen += data->datalen;
425
426	return (rv);
427}
428
429int
430vscsi_t2i(struct vscsi_softc *sc, struct vscsi_ioc_t2i *t2i)
431{
432	struct vscsi_ccb		*ccb;
433	struct scsi_xfer		*xs;
434	int				rv = 0;
435
436	TAILQ_FOREACH(ccb, &sc->sc_ccb_t2i, ccb_entry) {
437		if (ccb->ccb_tag == t2i->tag)
438			break;
439	}
440	if (ccb == NULL)
441		return (EFAULT);
442
443	TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
444
445	xs = ccb->ccb_xs;
446
447	xs->resid = xs->datalen - ccb->ccb_datalen;
448	xs->status = SCSI_OK;
449
450	switch (t2i->status) {
451	case VSCSI_STAT_DONE:
452		xs->error = XS_NOERROR;
453		break;
454	case VSCSI_STAT_SENSE:
455		xs->error = XS_SENSE;
456		memcpy(&xs->sense, &t2i->sense, sizeof(xs->sense));
457		break;
458	case VSCSI_STAT_RESET:
459		xs->error = XS_RESET;
460		break;
461	case VSCSI_STAT_ERR:
462	default:
463		xs->error = XS_DRIVER_STUFFUP;
464		break;
465	}
466
467	vscsi_done(sc, ccb);
468
469	return (rv);
470}
471
472struct vscsi_devevent_task {
473	struct vscsi_softc *sc;
474	struct task t;
475	struct vscsi_ioc_devevent de;
476	u_long cmd;
477};
478
479int
480vscsi_devevent(struct vscsi_softc *sc, u_long cmd,
481    struct vscsi_ioc_devevent *de)
482{
483	struct vscsi_devevent_task *dt;
484
485	dt = malloc(sizeof(*dt), M_TEMP, M_WAITOK | M_CANFAIL);
486	if (dt == NULL)
487		return (ENOMEM);
488
489	task_set(&dt->t, vscsi_devevent_task, dt);
490	dt->sc = sc;
491	dt->de = *de;
492	dt->cmd = cmd;
493
494	device_ref(&sc->sc_dev);
495	task_add(systq, &dt->t);
496
497	return (0);
498}
499
500void
501vscsi_devevent_task(void *xdt)
502{
503	struct vscsi_devevent_task *dt = xdt;
504	struct vscsi_softc *sc = dt->sc;
505	int state;
506
507	mtx_enter(&sc->sc_state_mtx);
508	state = sc->sc_state;
509	mtx_leave(&sc->sc_state_mtx);
510
511	if (state != VSCSI_S_RUNNING)
512		goto gone;
513
514	switch (dt->cmd) {
515	case VSCSI_REQPROBE:
516		scsi_probe(sc->sc_scsibus, dt->de.target, dt->de.lun);
517		break;
518	case VSCSI_REQDETACH:
519		scsi_detach(sc->sc_scsibus, dt->de.target, dt->de.lun,
520		    DETACH_FORCE);
521		break;
522#ifdef DIAGNOSTIC
523	default:
524		panic("unexpected vscsi_devevent cmd");
525		/* NOTREACHED */
526#endif
527	}
528
529gone:
530	device_unref(&sc->sc_dev);
531
532	free(dt, M_TEMP, sizeof(*dt));
533}
534
535int
536vscsikqfilter(dev_t dev, struct knote *kn)
537{
538	struct vscsi_softc *sc = DEV2SC(dev);
539
540	if (sc == NULL)
541		return (ENXIO);
542
543	switch (kn->kn_filter) {
544	case EVFILT_READ:
545		kn->kn_fop = &vscsi_filtops;
546		break;
547	default:
548		device_unref(&sc->sc_dev);
549		return (EINVAL);
550	}
551
552	kn->kn_hook = sc;
553	klist_insert(&sc->sc_klist, kn);
554
555	/* device ref is given to the knote in the klist */
556
557	return (0);
558}
559
560void
561filt_vscsidetach(struct knote *kn)
562{
563	struct vscsi_softc *sc = kn->kn_hook;
564
565	klist_remove(&sc->sc_klist, kn);
566	device_unref(&sc->sc_dev);
567}
568
569int
570filt_vscsiread(struct knote *kn, long hint)
571{
572	struct vscsi_softc *sc = kn->kn_hook;
573
574	return (!TAILQ_EMPTY(&sc->sc_ccb_i2t));
575}
576
577int
578filt_vscsimodify(struct kevent *kev, struct knote *kn)
579{
580	struct vscsi_softc *sc = kn->kn_hook;
581	int active;
582
583	mtx_enter(&sc->sc_state_mtx);
584	active = knote_modify(kev, kn);
585	mtx_leave(&sc->sc_state_mtx);
586
587	return (active);
588}
589
590int
591filt_vscsiprocess(struct knote *kn, struct kevent *kev)
592{
593	struct vscsi_softc *sc = kn->kn_hook;
594	int active;
595
596	mtx_enter(&sc->sc_state_mtx);
597	active = knote_process(kn, kev);
598	mtx_leave(&sc->sc_state_mtx);
599
600	return (active);
601}
602
603int
604vscsiclose(dev_t dev, int flags, int mode, struct proc *p)
605{
606	struct vscsi_softc		*sc = DEV2SC(dev);
607	struct vscsi_ccb		*ccb;
608
609	if (sc == NULL)
610		return (ENXIO);
611
612	mtx_enter(&sc->sc_state_mtx);
613	KASSERT(sc->sc_state == VSCSI_S_RUNNING);
614	sc->sc_state = VSCSI_S_CONFIG;
615	mtx_leave(&sc->sc_state_mtx);
616
617	scsi_activate(sc->sc_scsibus, -1, -1, DVACT_DEACTIVATE);
618
619	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_t2i)) != NULL) {
620		TAILQ_REMOVE(&sc->sc_ccb_t2i, ccb, ccb_entry);
621		ccb->ccb_xs->error = XS_RESET;
622		vscsi_done(sc, ccb);
623	}
624
625	while ((ccb = TAILQ_FIRST(&sc->sc_ccb_i2t)) != NULL) {
626		TAILQ_REMOVE(&sc->sc_ccb_i2t, ccb, ccb_entry);
627		ccb->ccb_xs->error = XS_RESET;
628		vscsi_done(sc, ccb);
629	}
630
631	scsi_req_detach(sc->sc_scsibus, -1, -1, DETACH_FORCE);
632
633	mtx_enter(&sc->sc_state_mtx);
634	while (sc->sc_ref_count > 0) {
635		msleep_nsec(&sc->sc_ref_count, &sc->sc_state_mtx,
636		    PRIBIO, "vscsiref", INFSLP);
637	}
638	mtx_leave(&sc->sc_state_mtx);
639
640	pool_destroy(&sc->sc_ccb_pool);
641
642	mtx_enter(&sc->sc_state_mtx);
643	sc->sc_state = VSCSI_S_CLOSED;
644	mtx_leave(&sc->sc_state_mtx);
645
646	device_unref(&sc->sc_dev);
647	return (0);
648}
649
650void *
651vscsi_ccb_get(void *cookie)
652{
653	struct vscsi_softc		*sc = cookie;
654	struct vscsi_ccb		*ccb = NULL;
655
656	ccb = pool_get(&sc->sc_ccb_pool, PR_NOWAIT);
657	if (ccb != NULL) {
658		ccb->ccb_tag = sc->sc_ccb_tag++;
659		ccb->ccb_datalen = 0;
660	}
661
662	return (ccb);
663}
664
665void
666vscsi_ccb_put(void *cookie, void *io)
667{
668	struct vscsi_softc		*sc = cookie;
669	struct vscsi_ccb		*ccb = io;
670
671	pool_put(&sc->sc_ccb_pool, ccb);
672}
673