1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2003-2009 Silicon Graphics International Corp.
5 * Copyright (c) 2012 The FreeBSD Foundation
6 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org>
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer,
15 *    without modification, immediately at the beginning of the file.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/types.h>
39#include <sys/lock.h>
40#include <sys/module.h>
41#include <sys/mutex.h>
42#include <sys/condvar.h>
43#include <sys/malloc.h>
44#include <sys/conf.h>
45#include <sys/queue.h>
46#include <sys/sysctl.h>
47#include <sys/nv.h>
48#include <sys/dnv.h>
49
50#include <cam/cam.h>
51#include <cam/scsi/scsi_all.h>
52#include <cam/scsi/scsi_da.h>
53#include <cam/ctl/ctl_io.h>
54#include <cam/ctl/ctl.h>
55#include <cam/ctl/ctl_frontend.h>
56#include <cam/ctl/ctl_util.h>
57#include <cam/ctl/ctl_backend.h>
58#include <cam/ctl/ctl_ioctl.h>
59#include <cam/ctl/ctl_ha.h>
60#include <cam/ctl/ctl_private.h>
61#include <cam/ctl/ctl_debug.h>
62#include <cam/ctl/ctl_error.h>
63
64typedef enum {
65	CTL_IOCTL_INPROG,
66	CTL_IOCTL_DATAMOVE,
67	CTL_IOCTL_DONE
68} ctl_fe_ioctl_state;
69
70struct ctl_fe_ioctl_params {
71	struct cv		sem;
72	struct mtx		ioctl_mtx;
73	ctl_fe_ioctl_state	state;
74};
75
76struct cfi_port {
77	TAILQ_ENTRY(cfi_port)	link;
78	uint32_t		cur_tag_num;
79	struct cdev *		dev;
80	struct ctl_port		port;
81};
82
83struct cfi_softc {
84	TAILQ_HEAD(, cfi_port)	ports;
85};
86
87
88static struct cfi_softc cfi_softc;
89
90
91static int cfi_init(void);
92static int cfi_shutdown(void);
93static void cfi_datamove(union ctl_io *io);
94static void cfi_done(union ctl_io *io);
95static int cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
96    struct thread *td);
97static void cfi_ioctl_port_create(struct ctl_req *req);
98static void cfi_ioctl_port_remove(struct ctl_req *req);
99
100static struct cdevsw cfi_cdevsw = {
101	.d_version = D_VERSION,
102	.d_flags = 0,
103	.d_ioctl = ctl_ioctl_io
104};
105
106static struct ctl_frontend cfi_frontend =
107{
108	.name = "ioctl",
109	.init = cfi_init,
110	.ioctl = cfi_ioctl,
111	.shutdown = cfi_shutdown,
112};
113CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend);
114
115static int
116cfi_init(void)
117{
118	struct cfi_softc *isoftc = &cfi_softc;
119	struct cfi_port *cfi;
120	struct ctl_port *port;
121	int error = 0;
122
123	memset(isoftc, 0, sizeof(*isoftc));
124	TAILQ_INIT(&isoftc->ports);
125
126	cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
127	port = &cfi->port;
128	port->frontend = &cfi_frontend;
129	port->port_type = CTL_PORT_IOCTL;
130	port->num_requested_ctl_io = 100;
131	port->port_name = "ioctl";
132	port->fe_datamove = cfi_datamove;
133	port->fe_done = cfi_done;
134	port->physical_port = 0;
135	port->targ_port = -1;
136
137	if ((error = ctl_port_register(port)) != 0) {
138		printf("%s: ioctl port registration failed\n", __func__);
139		return (error);
140	}
141
142	ctl_port_online(port);
143	TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
144	return (0);
145}
146
147static int
148cfi_shutdown(void)
149{
150	struct cfi_softc *isoftc = &cfi_softc;
151	struct cfi_port *cfi, *temp;
152	struct ctl_port *port;
153	int error;
154
155	TAILQ_FOREACH_SAFE(cfi, &isoftc->ports, link, temp) {
156		port = &cfi->port;
157		ctl_port_offline(port);
158		error = ctl_port_deregister(port);
159		if (error != 0) {
160			printf("%s: ctl_frontend_deregister() failed\n",
161			   __func__);
162			return (error);
163		}
164
165		TAILQ_REMOVE(&isoftc->ports, cfi, link);
166		free(cfi, M_CTL);
167	}
168
169	return (0);
170}
171
172static void
173cfi_ioctl_port_create(struct ctl_req *req)
174{
175	struct cfi_softc *isoftc = &cfi_softc;
176	struct cfi_port *cfi;
177	struct ctl_port *port;
178	struct make_dev_args args;
179	const char *val;
180	int retval;
181	int pp = -1, vp = 0;
182
183	val = dnvlist_get_string(req->args_nvl, "pp", NULL);
184	if (val != NULL)
185		pp = strtol(val, NULL, 10);
186
187	val = dnvlist_get_string(req->args_nvl, "vp", NULL);
188	if (val != NULL)
189		vp = strtol(val, NULL, 10);
190
191	if (pp != -1) {
192		/* Check for duplicates */
193		TAILQ_FOREACH(cfi, &isoftc->ports, link) {
194			if (pp == cfi->port.physical_port &&
195			    vp == cfi->port.virtual_port) {
196				req->status = CTL_LUN_ERROR;
197				snprintf(req->error_str, sizeof(req->error_str),
198				    "port %d already exists", pp);
199
200				return;
201			}
202		}
203	} else {
204		/* Find free port number */
205		TAILQ_FOREACH(cfi, &isoftc->ports, link) {
206			pp = MAX(pp, cfi->port.physical_port);
207		}
208
209		pp++;
210	}
211
212	cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
213	port = &cfi->port;
214	port->frontend = &cfi_frontend;
215	port->port_type = CTL_PORT_IOCTL;
216	port->num_requested_ctl_io = 100;
217	port->port_name = "ioctl";
218	port->fe_datamove = cfi_datamove;
219	port->fe_done = cfi_done;
220	port->physical_port = pp;
221	port->virtual_port = vp;
222	port->targ_port = -1;
223
224	retval = ctl_port_register(port);
225	if (retval != 0) {
226		req->status = CTL_LUN_ERROR;
227		snprintf(req->error_str, sizeof(req->error_str),
228		    "ctl_port_register() failed with error %d", retval);
229		free(cfi, M_CTL);
230		return;
231	}
232
233	req->result_nvl = nvlist_create(0);
234	nvlist_add_number(req->result_nvl, "port_id", port->targ_port);
235	ctl_port_online(port);
236
237	make_dev_args_init(&args);
238	args.mda_devsw = &cfi_cdevsw;
239	args.mda_uid = UID_ROOT;
240	args.mda_gid = GID_OPERATOR;
241	args.mda_mode = 0600;
242	args.mda_si_drv1 = NULL;
243	args.mda_si_drv2 = cfi;
244
245	retval = make_dev_s(&args, &cfi->dev, "cam/ctl%d.%d", pp, vp);
246	if (retval != 0) {
247		req->status = CTL_LUN_ERROR;
248		snprintf(req->error_str, sizeof(req->error_str),
249		    "make_dev_s() failed with error %d", retval);
250		ctl_port_offline(port);
251		ctl_port_deregister(port);
252		free(cfi, M_CTL);
253		return;
254	}
255
256	req->status = CTL_LUN_OK;
257	TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
258}
259
260static void
261cfi_ioctl_port_remove(struct ctl_req *req)
262{
263	struct cfi_softc *isoftc = &cfi_softc;
264	struct cfi_port *cfi = NULL;
265	const char *val;
266	int port_id = -1;
267
268	val = dnvlist_get_string(req->args_nvl, "port_id", NULL);
269	if (val != NULL)
270		port_id = strtol(val, NULL, 10);
271
272	if (port_id == -1) {
273		req->status = CTL_LUN_ERROR;
274		snprintf(req->error_str, sizeof(req->error_str),
275		    "port_id not provided");
276		return;
277	}
278
279	TAILQ_FOREACH(cfi, &isoftc->ports, link) {
280		if (cfi->port.targ_port == port_id)
281			break;
282	}
283
284	if (cfi == NULL) {
285		req->status = CTL_LUN_ERROR;
286		snprintf(req->error_str, sizeof(req->error_str),
287		    "cannot find port %d", port_id);
288
289		return;
290	}
291
292	if (cfi->port.physical_port == 0 && cfi->port.virtual_port == 0) {
293		req->status = CTL_LUN_ERROR;
294		snprintf(req->error_str, sizeof(req->error_str),
295		    "cannot destroy default ioctl port");
296
297		return;
298	}
299
300	ctl_port_offline(&cfi->port);
301	ctl_port_deregister(&cfi->port);
302	TAILQ_REMOVE(&isoftc->ports, cfi, link);
303	destroy_dev(cfi->dev);
304	free(cfi, M_CTL);
305	req->status = CTL_LUN_OK;
306}
307
308static int
309cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
310    struct thread *td)
311{
312	struct ctl_req *req;
313
314	if (cmd == CTL_PORT_REQ) {
315		req = (struct ctl_req *)addr;
316		switch (req->reqtype) {
317		case CTL_REQ_CREATE:
318			cfi_ioctl_port_create(req);
319			break;
320		case CTL_REQ_REMOVE:
321			cfi_ioctl_port_remove(req);
322			break;
323		default:
324			req->status = CTL_LUN_ERROR;
325			snprintf(req->error_str, sizeof(req->error_str),
326			    "Unsupported request type %d", req->reqtype);
327		}
328		return (0);
329	}
330
331	return (ENOTTY);
332}
333
334/*
335 * Data movement routine for the CTL ioctl frontend port.
336 */
337static int
338ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
339{
340	struct ctl_sg_entry *ext_sglist, *kern_sglist;
341	struct ctl_sg_entry ext_entry, kern_entry;
342	int ext_sglen, ext_sg_entries, kern_sg_entries;
343	int ext_sg_start, ext_offset;
344	int len_to_copy;
345	int kern_watermark, ext_watermark;
346	int ext_sglist_malloced;
347	int i, j;
348
349	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
350
351	/*
352	 * If this flag is set, fake the data transfer.
353	 */
354	if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
355		ext_sglist_malloced = 0;
356		ctsio->ext_data_filled += ctsio->kern_data_len;
357		ctsio->kern_data_resid = 0;
358		goto bailout;
359	}
360
361	/*
362	 * To simplify things here, if we have a single buffer, stick it in
363	 * a S/G entry and just make it a single entry S/G list.
364	 */
365	if (ctsio->ext_sg_entries > 0) {
366		int len_seen;
367
368		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
369		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
370							   M_WAITOK);
371		ext_sglist_malloced = 1;
372		if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
373			ctsio->io_hdr.port_status = 31343;
374			goto bailout;
375		}
376		ext_sg_entries = ctsio->ext_sg_entries;
377		ext_sg_start = ext_sg_entries;
378		ext_offset = 0;
379		len_seen = 0;
380		for (i = 0; i < ext_sg_entries; i++) {
381			if ((len_seen + ext_sglist[i].len) >=
382			     ctsio->ext_data_filled) {
383				ext_sg_start = i;
384				ext_offset = ctsio->ext_data_filled - len_seen;
385				break;
386			}
387			len_seen += ext_sglist[i].len;
388		}
389	} else {
390		ext_sglist = &ext_entry;
391		ext_sglist_malloced = 0;
392		ext_sglist->addr = ctsio->ext_data_ptr;
393		ext_sglist->len = ctsio->ext_data_len;
394		ext_sg_entries = 1;
395		ext_sg_start = 0;
396		ext_offset = ctsio->ext_data_filled;
397	}
398
399	if (ctsio->kern_sg_entries > 0) {
400		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
401		kern_sg_entries = ctsio->kern_sg_entries;
402	} else {
403		kern_sglist = &kern_entry;
404		kern_sglist->addr = ctsio->kern_data_ptr;
405		kern_sglist->len = ctsio->kern_data_len;
406		kern_sg_entries = 1;
407	}
408
409	kern_watermark = 0;
410	ext_watermark = ext_offset;
411	for (i = ext_sg_start, j = 0;
412	     i < ext_sg_entries && j < kern_sg_entries;) {
413		uint8_t *ext_ptr, *kern_ptr;
414
415		len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
416				  kern_sglist[j].len - kern_watermark);
417
418		ext_ptr = (uint8_t *)ext_sglist[i].addr;
419		ext_ptr = ext_ptr + ext_watermark;
420		if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
421			/*
422			 * XXX KDM fix this!
423			 */
424			panic("need to implement bus address support");
425#if 0
426			kern_ptr = bus_to_virt(kern_sglist[j].addr);
427#endif
428		} else
429			kern_ptr = (uint8_t *)kern_sglist[j].addr;
430		kern_ptr = kern_ptr + kern_watermark;
431
432		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
433		     CTL_FLAG_DATA_IN) {
434			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
435					 "bytes to user\n", len_to_copy));
436			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
437					 "to %p\n", kern_ptr, ext_ptr));
438			if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
439				ctsio->io_hdr.port_status = 31344;
440				goto bailout;
441			}
442		} else {
443			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
444					 "bytes from user\n", len_to_copy));
445			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
446					 "to %p\n", ext_ptr, kern_ptr));
447			if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
448				ctsio->io_hdr.port_status = 31345;
449				goto bailout;
450			}
451		}
452
453		ctsio->ext_data_filled += len_to_copy;
454		ctsio->kern_data_resid -= len_to_copy;
455
456		ext_watermark += len_to_copy;
457		if (ext_sglist[i].len == ext_watermark) {
458			i++;
459			ext_watermark = 0;
460		}
461
462		kern_watermark += len_to_copy;
463		if (kern_sglist[j].len == kern_watermark) {
464			j++;
465			kern_watermark = 0;
466		}
467	}
468
469	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
470			 "kern_sg_entries: %d\n", ext_sg_entries,
471			 kern_sg_entries));
472	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
473			 "kern_data_len = %d\n", ctsio->ext_data_len,
474			 ctsio->kern_data_len));
475
476bailout:
477	if (ext_sglist_malloced != 0)
478		free(ext_sglist, M_CTL);
479
480	return (CTL_RETVAL_COMPLETE);
481}
482
483static void
484cfi_datamove(union ctl_io *io)
485{
486	struct ctl_fe_ioctl_params *params;
487
488	params = (struct ctl_fe_ioctl_params *)
489		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
490
491	mtx_lock(&params->ioctl_mtx);
492	params->state = CTL_IOCTL_DATAMOVE;
493	cv_broadcast(&params->sem);
494	mtx_unlock(&params->ioctl_mtx);
495}
496
497static void
498cfi_done(union ctl_io *io)
499{
500	struct ctl_fe_ioctl_params *params;
501
502	params = (struct ctl_fe_ioctl_params *)
503		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
504
505	mtx_lock(&params->ioctl_mtx);
506	params->state = CTL_IOCTL_DONE;
507	cv_broadcast(&params->sem);
508	mtx_unlock(&params->ioctl_mtx);
509}
510
511static int
512cfi_submit_wait(union ctl_io *io)
513{
514	struct ctl_fe_ioctl_params params;
515	ctl_fe_ioctl_state last_state;
516	int done, retval;
517
518	bzero(&params, sizeof(params));
519	mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
520	cv_init(&params.sem, "ctlioccv");
521	params.state = CTL_IOCTL_INPROG;
522	last_state = params.state;
523
524	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
525
526	CTL_DEBUG_PRINT(("cfi_submit_wait\n"));
527
528	/* This shouldn't happen */
529	if ((retval = ctl_run(io)) != CTL_RETVAL_COMPLETE)
530		return (retval);
531
532	done = 0;
533
534	do {
535		mtx_lock(&params.ioctl_mtx);
536		/*
537		 * Check the state here, and don't sleep if the state has
538		 * already changed (i.e. wakeup has already occurred, but we
539		 * weren't waiting yet).
540		 */
541		if (params.state == last_state) {
542			/* XXX KDM cv_wait_sig instead? */
543			cv_wait(&params.sem, &params.ioctl_mtx);
544		}
545		last_state = params.state;
546
547		switch (params.state) {
548		case CTL_IOCTL_INPROG:
549			/* Why did we wake up? */
550			/* XXX KDM error here? */
551			mtx_unlock(&params.ioctl_mtx);
552			break;
553		case CTL_IOCTL_DATAMOVE:
554			CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
555
556			/*
557			 * change last_state back to INPROG to avoid
558			 * deadlock on subsequent data moves.
559			 */
560			params.state = last_state = CTL_IOCTL_INPROG;
561
562			mtx_unlock(&params.ioctl_mtx);
563			ctl_ioctl_do_datamove(&io->scsiio);
564			/*
565			 * Note that in some cases, most notably writes,
566			 * this will queue the I/O and call us back later.
567			 * In other cases, generally reads, this routine
568			 * will immediately call back and wake us up,
569			 * probably using our own context.
570			 */
571			ctl_datamove_done(io, false);
572			break;
573		case CTL_IOCTL_DONE:
574			mtx_unlock(&params.ioctl_mtx);
575			CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
576			done = 1;
577			break;
578		default:
579			mtx_unlock(&params.ioctl_mtx);
580			/* XXX KDM error here? */
581			break;
582		}
583	} while (done == 0);
584
585	mtx_destroy(&params.ioctl_mtx);
586	cv_destroy(&params.sem);
587
588	return (CTL_RETVAL_COMPLETE);
589}
590
591int
592ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
593    struct thread *td)
594{
595	struct cfi_port *cfi;
596	union ctl_io *io;
597	void *pool_tmp, *sc_tmp;
598	int retval = 0;
599
600	if (cmd != CTL_IO)
601		return (ENOTTY);
602
603	cfi = dev->si_drv2 == NULL
604	    ? TAILQ_FIRST(&cfi_softc.ports)
605	    : dev->si_drv2;
606
607	/*
608	 * If we haven't been "enabled", don't allow any SCSI I/O
609	 * to this FETD.
610	 */
611	if ((cfi->port.status & CTL_PORT_STATUS_ONLINE) == 0)
612		return (EPERM);
613
614	io = ctl_alloc_io(cfi->port.ctl_pool_ref);
615
616	/*
617	 * Need to save the pool reference so it doesn't get
618	 * spammed by the user's ctl_io.
619	 */
620	pool_tmp = io->io_hdr.pool;
621	sc_tmp = CTL_SOFTC(io);
622	memcpy(io, (void *)addr, sizeof(*io));
623	io->io_hdr.pool = pool_tmp;
624	CTL_SOFTC(io) = sc_tmp;
625	TAILQ_INIT(&io->io_hdr.blocked_queue);
626
627	/*
628	 * No status yet, so make sure the status is set properly.
629	 */
630	io->io_hdr.status = CTL_STATUS_NONE;
631
632	/*
633	 * The user sets the initiator ID, target and LUN IDs.
634	 */
635	io->io_hdr.nexus.targ_port = cfi->port.targ_port;
636	io->io_hdr.flags |= CTL_FLAG_USER_REQ;
637	if ((io->io_hdr.io_type == CTL_IO_SCSI) &&
638	    (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
639		io->scsiio.tag_num = cfi->cur_tag_num++;
640
641	retval = cfi_submit_wait(io);
642	if (retval == 0)
643		memcpy((void *)addr, io, sizeof(*io));
644
645	ctl_free_io(io);
646	return (retval);
647}
648