1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
5 * Copyright (c) 2012 The FreeBSD Foundation
6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Portions of this software were developed by Edward Tomasz Napierala
10 * under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions, and the following disclaimer,
17 *    without modification.
18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19 *    substantially similar to the "NO WARRANTY" disclaimer below
20 *    ("Disclaimer") and any redistribution must be conditioned upon
21 *    including a substantially similar Disclaimer requirement for further
22 *    binary redistribution.
23 *
24 * NO WARRANTY
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGES.
36 *
37 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
38 */
39/*
40 * CAM Target Layer black hole and RAM disk backend.
41 *
42 * Author: Ken Merry <ken@FreeBSD.org>
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD$");
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/kernel.h>
51#include <sys/condvar.h>
52#include <sys/types.h>
53#include <sys/limits.h>
54#include <sys/lock.h>
55#include <sys/mutex.h>
56#include <sys/malloc.h>
57#include <sys/sx.h>
58#include <sys/taskqueue.h>
59#include <sys/time.h>
60#include <sys/queue.h>
61#include <sys/conf.h>
62#include <sys/ioccom.h>
63#include <sys/module.h>
64#include <sys/sysctl.h>
65#include <sys/nv.h>
66#include <sys/dnv.h>
67
68#include <cam/scsi/scsi_all.h>
69#include <cam/scsi/scsi_da.h>
70#include <cam/ctl/ctl_io.h>
71#include <cam/ctl/ctl.h>
72#include <cam/ctl/ctl_util.h>
73#include <cam/ctl/ctl_backend.h>
74#include <cam/ctl/ctl_debug.h>
75#include <cam/ctl/ctl_ioctl.h>
76#include <cam/ctl/ctl_ha.h>
77#include <cam/ctl/ctl_private.h>
78#include <cam/ctl/ctl_error.h>
79
80#define PRIV(io)	\
81    ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
82#define ARGS(io)	\
83    ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
84
85#define	PPP	(PAGE_SIZE / sizeof(uint8_t **))
86#ifdef __LP64__
87#define	PPPS	(PAGE_SHIFT - 3)
88#else
89#define	PPPS	(PAGE_SHIFT - 2)
90#endif
91#define	SGPP	(PAGE_SIZE / sizeof(struct ctl_sg_entry))
92
93#define	P_UNMAPPED	NULL			/* Page is unmapped. */
94#define	P_ANCHORED	((void *)(uintptr_t)1)	/* Page is anchored. */
95
96typedef enum {
97	GP_READ,	/* Return data page or zero page. */
98	GP_WRITE,	/* Return data page, try allocate if none. */
99	GP_ANCHOR,	/* Return data page, try anchor if none. */
100	GP_OTHER,	/* Return what present, do not allocate/anchor. */
101} getpage_op_t;
102
103typedef enum {
104	CTL_BE_RAMDISK_LUN_UNCONFIGURED	= 0x01,
105	CTL_BE_RAMDISK_LUN_WAITING	= 0x04
106} ctl_be_ramdisk_lun_flags;
107
108struct ctl_be_ramdisk_lun {
109	struct ctl_be_lun	cbe_lun;	/* Must be first element. */
110	struct ctl_lun_create_params params;
111	int			indir;
112	uint8_t			**pages;
113	uint8_t			*zero_page;
114	struct sx		page_lock;
115	u_int			pblocksize;
116	u_int			pblockmul;
117	uint64_t		size_bytes;
118	uint64_t		size_blocks;
119	uint64_t		cap_bytes;
120	uint64_t		cap_used;
121	struct ctl_be_ramdisk_softc *softc;
122	ctl_be_ramdisk_lun_flags flags;
123	SLIST_ENTRY(ctl_be_ramdisk_lun) links;
124	struct taskqueue	*io_taskqueue;
125	struct task		io_task;
126	STAILQ_HEAD(, ctl_io_hdr) cont_queue;
127	struct mtx_padalign	queue_lock;
128};
129
130struct ctl_be_ramdisk_softc {
131	struct sx modify_lock;
132	struct mtx lock;
133	int num_luns;
134	SLIST_HEAD(, ctl_be_ramdisk_lun) lun_list;
135};
136
137static struct ctl_be_ramdisk_softc rd_softc;
138extern struct ctl_softc *control_softc;
139
140static int ctl_backend_ramdisk_init(void);
141static int ctl_backend_ramdisk_shutdown(void);
142static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr);
143static void ctl_backend_ramdisk_compare(union ctl_io *io);
144static void ctl_backend_ramdisk_rw(union ctl_io *io);
145static int ctl_backend_ramdisk_submit(union ctl_io *io);
146static void ctl_backend_ramdisk_worker(void *context, int pending);
147static int ctl_backend_ramdisk_config_read(union ctl_io *io);
148static int ctl_backend_ramdisk_config_write(union ctl_io *io);
149static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname);
150static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
151				     caddr_t addr, int flag, struct thread *td);
152static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
153				  struct ctl_lun_req *req);
154static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
155				      struct ctl_lun_req *req);
156static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
157				  struct ctl_lun_req *req);
158static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun);
159
160static struct ctl_backend_driver ctl_be_ramdisk_driver =
161{
162	.name = "ramdisk",
163	.flags = CTL_BE_FLAG_HAS_CONFIG,
164	.init = ctl_backend_ramdisk_init,
165	.shutdown = ctl_backend_ramdisk_shutdown,
166	.data_submit = ctl_backend_ramdisk_submit,
167	.config_read = ctl_backend_ramdisk_config_read,
168	.config_write = ctl_backend_ramdisk_config_write,
169	.ioctl = ctl_backend_ramdisk_ioctl,
170	.lun_attr = ctl_backend_ramdisk_lun_attr,
171};
172
173MALLOC_DEFINE(M_RAMDISK, "ctlramdisk", "Memory used for CTL RAMdisk");
174CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
175
176static int
177ctl_backend_ramdisk_init(void)
178{
179	struct ctl_be_ramdisk_softc *softc = &rd_softc;
180
181	memset(softc, 0, sizeof(*softc));
182	sx_init(&softc->modify_lock, "ctlrammod");
183	mtx_init(&softc->lock, "ctlram", NULL, MTX_DEF);
184	SLIST_INIT(&softc->lun_list);
185	return (0);
186}
187
188static int
189ctl_backend_ramdisk_shutdown(void)
190{
191	struct ctl_be_ramdisk_softc *softc = &rd_softc;
192	struct ctl_be_ramdisk_lun *lun;
193
194	mtx_lock(&softc->lock);
195	while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) {
196		SLIST_REMOVE_HEAD(&softc->lun_list, links);
197		softc->num_luns--;
198		/*
199		 * Drop our lock here.  Since ctl_remove_lun() can call
200		 * back into us, this could potentially lead to a recursive
201		 * lock of the same mutex, which would cause a hang.
202		 */
203		mtx_unlock(&softc->lock);
204		ctl_remove_lun(&lun->cbe_lun);
205		mtx_lock(&softc->lock);
206	}
207	mtx_unlock(&softc->lock);
208	mtx_destroy(&softc->lock);
209	sx_destroy(&softc->modify_lock);
210	return (0);
211}
212
213static uint8_t *
214ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
215    getpage_op_t op)
216{
217	uint8_t **p, ***pp;
218	off_t i;
219	int s;
220
221	if (be_lun->cap_bytes == 0) {
222		switch (op) {
223		case GP_READ:
224			return (be_lun->zero_page);
225		case GP_WRITE:
226			return ((uint8_t *)be_lun->pages);
227		case GP_ANCHOR:
228			return (P_ANCHORED);
229		default:
230			return (P_UNMAPPED);
231		}
232	}
233	if (op == GP_WRITE || op == GP_ANCHOR) {
234		sx_xlock(&be_lun->page_lock);
235		pp = &be_lun->pages;
236		for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
237			if (*pp == NULL) {
238				*pp = malloc(PAGE_SIZE, M_RAMDISK,
239				    M_WAITOK|M_ZERO);
240			}
241			i = pn >> s;
242			pp = (uint8_t ***)&(*pp)[i];
243			pn -= i << s;
244		}
245		if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
246			if (op == GP_WRITE) {
247				*pp = malloc(be_lun->pblocksize, M_RAMDISK,
248				    M_WAITOK|M_ZERO);
249			} else
250				*pp = P_ANCHORED;
251			be_lun->cap_used += be_lun->pblocksize;
252		} else if (*pp == P_ANCHORED && op == GP_WRITE) {
253			*pp = malloc(be_lun->pblocksize, M_RAMDISK,
254			    M_WAITOK|M_ZERO);
255		}
256		sx_xunlock(&be_lun->page_lock);
257		return ((uint8_t *)*pp);
258	} else {
259		sx_slock(&be_lun->page_lock);
260		p = be_lun->pages;
261		for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
262			if (p == NULL)
263				break;
264			i = pn >> s;
265			p = (uint8_t **)p[i];
266			pn -= i << s;
267		}
268		sx_sunlock(&be_lun->page_lock);
269		if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ)
270			return (be_lun->zero_page);
271		return ((uint8_t *)p);
272	}
273};
274
275static void
276ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
277{
278	uint8_t ***pp;
279	off_t i;
280	int s;
281
282	if (be_lun->cap_bytes == 0)
283		return;
284	sx_xlock(&be_lun->page_lock);
285	pp = &be_lun->pages;
286	for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
287		if (*pp == NULL)
288			goto noindir;
289		i = pn >> s;
290		pp = (uint8_t ***)&(*pp)[i];
291		pn -= i << s;
292	}
293	if (*pp == P_ANCHORED) {
294		be_lun->cap_used -= be_lun->pblocksize;
295		*pp = P_UNMAPPED;
296	} else if (*pp != P_UNMAPPED) {
297		free(*pp, M_RAMDISK);
298		be_lun->cap_used -= be_lun->pblocksize;
299		*pp = P_UNMAPPED;
300	}
301noindir:
302	sx_xunlock(&be_lun->page_lock);
303};
304
305static void
306ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
307{
308	uint8_t ***pp;
309	off_t i;
310	int s;
311
312	if (be_lun->cap_bytes == 0)
313		return;
314	sx_xlock(&be_lun->page_lock);
315	pp = &be_lun->pages;
316	for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
317		if (*pp == NULL)
318			goto noindir;
319		i = pn >> s;
320		pp = (uint8_t ***)&(*pp)[i];
321		pn -= i << s;
322	}
323	if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
324		be_lun->cap_used += be_lun->pblocksize;
325		*pp = P_ANCHORED;
326	} else if (*pp != P_ANCHORED) {
327		free(*pp, M_RAMDISK);
328		*pp = P_ANCHORED;
329	}
330noindir:
331	sx_xunlock(&be_lun->page_lock);
332};
333
334static void
335ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir)
336{
337	int i;
338
339	if (p == NULL)
340		return;
341	if (indir == 0) {
342		free(p, M_RAMDISK);
343		return;
344	}
345	for (i = 0; i < PPP; i++) {
346		if (p[i] == NULL)
347			continue;
348		ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1);
349	}
350	free(p, M_RAMDISK);
351};
352
353static size_t
354cmp(uint8_t *a, uint8_t *b, size_t size)
355{
356	size_t i;
357
358	for (i = 0; i < size; i++) {
359		if (a[i] != b[i])
360			break;
361	}
362	return (i);
363}
364
365static int
366ctl_backend_ramdisk_cmp(union ctl_io *io)
367{
368	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
369	struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
370	uint8_t *page;
371	uint8_t info[8];
372	uint64_t lba;
373	u_int lbaoff, lbas, res, off;
374
375	lbas = io->scsiio.kern_data_len / cbe_lun->blocksize;
376	lba = ARGS(io)->lba + PRIV(io)->len - lbas;
377	off = 0;
378	for (; lbas > 0; lbas--, lba++) {
379		page = ctl_backend_ramdisk_getpage(be_lun,
380		    lba >> cbe_lun->pblockexp, GP_READ);
381		lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
382		page += lbaoff * cbe_lun->blocksize;
383		res = cmp(io->scsiio.kern_data_ptr + off, page,
384		    cbe_lun->blocksize);
385		off += res;
386		if (res < cbe_lun->blocksize)
387			break;
388	}
389	if (lbas > 0) {
390		off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len;
391		scsi_u64to8b(off, info);
392		ctl_set_sense(&io->scsiio, /*current_error*/ 1,
393		    /*sense_key*/ SSD_KEY_MISCOMPARE,
394		    /*asc*/ 0x1D, /*ascq*/ 0x00,
395		    /*type*/ SSD_ELEM_INFO,
396		    /*size*/ sizeof(info), /*data*/ &info,
397		    /*type*/ SSD_ELEM_NONE);
398		return (1);
399	}
400	return (0);
401}
402
403static int
404ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr)
405{
406	struct ctl_be_ramdisk_lun *be_lun =
407	    (struct ctl_be_ramdisk_lun *)CTL_BACKEND_LUN(io);
408
409	CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
410	if (io->scsiio.kern_sg_entries > 0)
411		free(io->scsiio.kern_data_ptr, M_RAMDISK);
412	io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
413	if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
414	    (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) {
415		if (ARGS(io)->flags & CTL_LLF_COMPARE) {
416			/* We have data block ready for comparison. */
417			if (ctl_backend_ramdisk_cmp(io))
418				goto done;
419		}
420		if (ARGS(io)->len > PRIV(io)->len) {
421			mtx_lock(&be_lun->queue_lock);
422			STAILQ_INSERT_TAIL(&be_lun->cont_queue,
423			    &io->io_hdr, links);
424			mtx_unlock(&be_lun->queue_lock);
425			taskqueue_enqueue(be_lun->io_taskqueue,
426			    &be_lun->io_task);
427			return (0);
428		}
429		ctl_set_success(&io->scsiio);
430	}
431done:
432	ctl_data_submit_done(io);
433	return(0);
434}
435
436static void
437ctl_backend_ramdisk_compare(union ctl_io *io)
438{
439	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
440	u_int lbas, len;
441
442	lbas = ARGS(io)->len - PRIV(io)->len;
443	lbas = MIN(lbas, 131072 / cbe_lun->blocksize);
444	len = lbas * cbe_lun->blocksize;
445
446	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
447	io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK);
448	io->scsiio.kern_data_len = len;
449	io->scsiio.kern_sg_entries = 0;
450	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
451	PRIV(io)->len += lbas;
452	ctl_datamove(io);
453}
454
455static void
456ctl_backend_ramdisk_rw(union ctl_io *io)
457{
458	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
459	struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
460	struct ctl_sg_entry *sg_entries;
461	uint8_t *page;
462	uint64_t lba;
463	u_int i, len, lbaoff, lbas, sgs, off;
464	getpage_op_t op;
465
466	lba = ARGS(io)->lba + PRIV(io)->len;
467	lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
468	lbas = ARGS(io)->len - PRIV(io)->len;
469	lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff);
470	sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
471	off = lbaoff * cbe_lun->blocksize;
472	op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
473	if (sgs > 1) {
474		io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
475		    sgs, M_RAMDISK, M_WAITOK);
476		sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
477		len = lbas * cbe_lun->blocksize;
478		for (i = 0; i < sgs; i++) {
479			page = ctl_backend_ramdisk_getpage(be_lun,
480			    (lba >> cbe_lun->pblockexp) + i, op);
481			if (page == P_UNMAPPED || page == P_ANCHORED) {
482				free(io->scsiio.kern_data_ptr, M_RAMDISK);
483nospc:
484				ctl_set_space_alloc_fail(&io->scsiio);
485				ctl_data_submit_done(io);
486				return;
487			}
488			sg_entries[i].addr = page + off;
489			sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
490			len -= sg_entries[i].len;
491			off = 0;
492		}
493	} else {
494		page = ctl_backend_ramdisk_getpage(be_lun,
495		    lba >> cbe_lun->pblockexp, op);
496		if (page == P_UNMAPPED || page == P_ANCHORED)
497			goto nospc;
498		sgs = 0;
499		io->scsiio.kern_data_ptr = page + off;
500	}
501
502	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
503	io->scsiio.kern_data_len = lbas * cbe_lun->blocksize;
504	io->scsiio.kern_sg_entries = sgs;
505	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
506	PRIV(io)->len += lbas;
507	if ((ARGS(io)->flags & CTL_LLF_READ) &&
508	    ARGS(io)->len <= PRIV(io)->len) {
509		ctl_set_success(&io->scsiio);
510		ctl_serseq_done(io);
511	}
512	ctl_datamove(io);
513}
514
515static int
516ctl_backend_ramdisk_submit(union ctl_io *io)
517{
518	struct ctl_lba_len_flags *lbalen = ARGS(io);
519
520	if (lbalen->flags & CTL_LLF_VERIFY) {
521		ctl_set_success(&io->scsiio);
522		ctl_data_submit_done(io);
523		return (CTL_RETVAL_COMPLETE);
524	}
525	PRIV(io)->len = 0;
526	if (lbalen->flags & CTL_LLF_COMPARE)
527		ctl_backend_ramdisk_compare(io);
528	else
529		ctl_backend_ramdisk_rw(io);
530	return (CTL_RETVAL_COMPLETE);
531}
532
533static void
534ctl_backend_ramdisk_worker(void *context, int pending)
535{
536	struct ctl_be_ramdisk_lun *be_lun;
537	union ctl_io *io;
538
539	be_lun = (struct ctl_be_ramdisk_lun *)context;
540	mtx_lock(&be_lun->queue_lock);
541	for (;;) {
542		io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
543		if (io != NULL) {
544			STAILQ_REMOVE_HEAD(&be_lun->cont_queue, links);
545			mtx_unlock(&be_lun->queue_lock);
546			if (ARGS(io)->flags & CTL_LLF_COMPARE)
547				ctl_backend_ramdisk_compare(io);
548			else
549				ctl_backend_ramdisk_rw(io);
550			mtx_lock(&be_lun->queue_lock);
551			continue;
552		}
553
554		/*
555		 * If we get here, there is no work left in the queues, so
556		 * just break out and let the task queue go to sleep.
557		 */
558		break;
559	}
560	mtx_unlock(&be_lun->queue_lock);
561}
562
563static int
564ctl_backend_ramdisk_gls(union ctl_io *io)
565{
566	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
567	struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
568	struct scsi_get_lba_status_data *data;
569	uint8_t *page;
570	u_int lbaoff;
571
572	data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
573	scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
574	lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
575	scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
576	page = ctl_backend_ramdisk_getpage(be_lun,
577	    ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
578	if (page == P_UNMAPPED)
579		data->descr[0].status = 1;
580	else if (page == P_ANCHORED)
581		data->descr[0].status = 2;
582	else
583		data->descr[0].status = 0;
584	ctl_config_read_done(io);
585	return (CTL_RETVAL_COMPLETE);
586}
587
588static int
589ctl_backend_ramdisk_config_read(union ctl_io *io)
590{
591	int retval = 0;
592
593	switch (io->scsiio.cdb[0]) {
594	case SERVICE_ACTION_IN:
595		if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
596			retval = ctl_backend_ramdisk_gls(io);
597			break;
598		}
599		ctl_set_invalid_field(&io->scsiio,
600				      /*sks_valid*/ 1,
601				      /*command*/ 1,
602				      /*field*/ 1,
603				      /*bit_valid*/ 1,
604				      /*bit*/ 4);
605		ctl_config_read_done(io);
606		retval = CTL_RETVAL_COMPLETE;
607		break;
608	default:
609		ctl_set_invalid_opcode(&io->scsiio);
610		ctl_config_read_done(io);
611		retval = CTL_RETVAL_COMPLETE;
612		break;
613	}
614	return (retval);
615}
616
617static void
618ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len,
619    int anchor)
620{
621	struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
622	uint8_t *page;
623	uint64_t p, lp;
624	u_int lbaoff;
625	getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER;
626
627	/* Partially zero first partial page. */
628	p = lba >> cbe_lun->pblockexp;
629	lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
630	if (lbaoff != 0) {
631		page = ctl_backend_ramdisk_getpage(be_lun, p, op);
632		if (page != P_UNMAPPED && page != P_ANCHORED) {
633			memset(page + lbaoff * cbe_lun->blocksize, 0,
634			    min(len, be_lun->pblockmul - lbaoff) *
635			    cbe_lun->blocksize);
636		}
637		p++;
638	}
639
640	/* Partially zero last partial page. */
641	lp = (lba + len) >> cbe_lun->pblockexp;
642	lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp);
643	if (p <= lp && lbaoff != 0) {
644		page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
645		if (page != P_UNMAPPED && page != P_ANCHORED)
646			memset(page, 0, lbaoff * cbe_lun->blocksize);
647	}
648
649	/* Delete remaining full pages. */
650	if (anchor) {
651		for (; p < lp; p++)
652			ctl_backend_ramdisk_anchorpage(be_lun, p);
653	} else {
654		for (; p < lp; p++)
655			ctl_backend_ramdisk_unmappage(be_lun, p);
656	}
657}
658
659static void
660ctl_backend_ramdisk_ws(union ctl_io *io)
661{
662	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
663	struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
664	struct ctl_lba_len_flags *lbalen = ARGS(io);
665	uint8_t *page;
666	uint64_t lba;
667	u_int lbaoff, lbas;
668
669	if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) {
670		ctl_set_invalid_field(&io->scsiio,
671				      /*sks_valid*/ 1,
672				      /*command*/ 1,
673				      /*field*/ 1,
674				      /*bit_valid*/ 0,
675				      /*bit*/ 0);
676		ctl_config_write_done(io);
677		return;
678	}
679	if (lbalen->flags & SWS_UNMAP) {
680		ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len,
681		    (lbalen->flags & SWS_ANCHOR) != 0);
682		ctl_set_success(&io->scsiio);
683		ctl_config_write_done(io);
684		return;
685	}
686
687	for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) {
688		page = ctl_backend_ramdisk_getpage(be_lun,
689		    lba >> cbe_lun->pblockexp, GP_WRITE);
690		if (page == P_UNMAPPED || page == P_ANCHORED) {
691			ctl_set_space_alloc_fail(&io->scsiio);
692			ctl_data_submit_done(io);
693			return;
694		}
695		lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
696		page += lbaoff * cbe_lun->blocksize;
697		if (lbalen->flags & SWS_NDOB) {
698			memset(page, 0, cbe_lun->blocksize);
699		} else {
700			memcpy(page, io->scsiio.kern_data_ptr,
701			    cbe_lun->blocksize);
702		}
703		if (lbalen->flags & SWS_LBDATA)
704			scsi_ulto4b(lba, page);
705	}
706	ctl_set_success(&io->scsiio);
707	ctl_config_write_done(io);
708}
709
710static void
711ctl_backend_ramdisk_unmap(union ctl_io *io)
712{
713	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
714	struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
715	struct scsi_unmap_desc *buf, *end;
716
717	if ((ptrlen->flags & ~SU_ANCHOR) != 0) {
718		ctl_set_invalid_field(&io->scsiio,
719				      /*sks_valid*/ 0,
720				      /*command*/ 0,
721				      /*field*/ 0,
722				      /*bit_valid*/ 0,
723				      /*bit*/ 0);
724		ctl_config_write_done(io);
725		return;
726	}
727
728	buf = (struct scsi_unmap_desc *)ptrlen->ptr;
729	end = buf + ptrlen->len / sizeof(*buf);
730	for (; buf < end; buf++) {
731		ctl_backend_ramdisk_delete(cbe_lun,
732		    scsi_8btou64(buf->lba), scsi_4btoul(buf->length),
733		    (ptrlen->flags & SU_ANCHOR) != 0);
734	}
735
736	ctl_set_success(&io->scsiio);
737	ctl_config_write_done(io);
738}
739
740static int
741ctl_backend_ramdisk_config_write(union ctl_io *io)
742{
743	struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
744	int retval = 0;
745
746	switch (io->scsiio.cdb[0]) {
747	case SYNCHRONIZE_CACHE:
748	case SYNCHRONIZE_CACHE_16:
749		/* We have no cache to flush. */
750		ctl_set_success(&io->scsiio);
751		ctl_config_write_done(io);
752		break;
753	case START_STOP_UNIT: {
754		struct scsi_start_stop_unit *cdb;
755
756		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
757		if ((cdb->how & SSS_PC_MASK) != 0) {
758			ctl_set_success(&io->scsiio);
759			ctl_config_write_done(io);
760			break;
761		}
762		if (cdb->how & SSS_START) {
763			if (cdb->how & SSS_LOEJ)
764				ctl_lun_has_media(cbe_lun);
765			ctl_start_lun(cbe_lun);
766		} else {
767			ctl_stop_lun(cbe_lun);
768			if (cdb->how & SSS_LOEJ)
769				ctl_lun_ejected(cbe_lun);
770		}
771		ctl_set_success(&io->scsiio);
772		ctl_config_write_done(io);
773		break;
774	}
775	case PREVENT_ALLOW:
776		ctl_set_success(&io->scsiio);
777		ctl_config_write_done(io);
778		break;
779	case WRITE_SAME_10:
780	case WRITE_SAME_16:
781		ctl_backend_ramdisk_ws(io);
782		break;
783	case UNMAP:
784		ctl_backend_ramdisk_unmap(io);
785		break;
786	default:
787		ctl_set_invalid_opcode(&io->scsiio);
788		ctl_config_write_done(io);
789		retval = CTL_RETVAL_COMPLETE;
790		break;
791	}
792
793	return (retval);
794}
795
796static uint64_t
797ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname)
798{
799	struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
800	uint64_t		val;
801
802	val = UINT64_MAX;
803	if (be_lun->cap_bytes == 0)
804		return (val);
805	sx_slock(&be_lun->page_lock);
806	if (strcmp(attrname, "blocksused") == 0) {
807		val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
808	} else if (strcmp(attrname, "blocksavail") == 0) {
809		val = (be_lun->cap_bytes - be_lun->cap_used) /
810		    be_lun->cbe_lun.blocksize;
811	}
812	sx_sunlock(&be_lun->page_lock);
813	return (val);
814}
815
816static int
817ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
818			  int flag, struct thread *td)
819{
820	struct ctl_be_ramdisk_softc *softc = &rd_softc;
821	struct ctl_lun_req *lun_req;
822	int retval;
823
824	retval = 0;
825	switch (cmd) {
826	case CTL_LUN_REQ:
827		lun_req = (struct ctl_lun_req *)addr;
828		switch (lun_req->reqtype) {
829		case CTL_LUNREQ_CREATE:
830			retval = ctl_backend_ramdisk_create(softc, lun_req);
831			break;
832		case CTL_LUNREQ_RM:
833			retval = ctl_backend_ramdisk_rm(softc, lun_req);
834			break;
835		case CTL_LUNREQ_MODIFY:
836			retval = ctl_backend_ramdisk_modify(softc, lun_req);
837			break;
838		default:
839			lun_req->status = CTL_LUN_ERROR;
840			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
841				 "%s: invalid LUN request type %d", __func__,
842				 lun_req->reqtype);
843			break;
844		}
845		break;
846	default:
847		retval = ENOTTY;
848		break;
849	}
850
851	return (retval);
852}
853
854static int
855ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
856		       struct ctl_lun_req *req)
857{
858	struct ctl_be_ramdisk_lun *be_lun;
859	struct ctl_lun_rm_params *params;
860	int retval;
861
862	params = &req->reqdata.rm;
863	sx_xlock(&softc->modify_lock);
864	mtx_lock(&softc->lock);
865	SLIST_FOREACH(be_lun, &softc->lun_list, links) {
866		if (be_lun->cbe_lun.lun_id == params->lun_id) {
867			SLIST_REMOVE(&softc->lun_list, be_lun,
868			    ctl_be_ramdisk_lun, links);
869			softc->num_luns--;
870			break;
871		}
872	}
873	mtx_unlock(&softc->lock);
874	sx_xunlock(&softc->modify_lock);
875	if (be_lun == NULL) {
876		snprintf(req->error_str, sizeof(req->error_str),
877			 "%s: LUN %u is not managed by the ramdisk backend",
878			 __func__, params->lun_id);
879		goto bailout_error;
880	}
881
882	/*
883	 * Set the waiting flag before we invalidate the LUN.  Our shutdown
884	 * routine can be called any time after we invalidate the LUN,
885	 * and can be called from our context.
886	 *
887	 * This tells the shutdown routine that we're waiting, or we're
888	 * going to wait for the shutdown to happen.
889	 */
890	mtx_lock(&softc->lock);
891	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
892	mtx_unlock(&softc->lock);
893
894	retval = ctl_remove_lun(&be_lun->cbe_lun);
895	if (retval != 0) {
896		snprintf(req->error_str, sizeof(req->error_str),
897			 "%s: error %d returned from ctl_remove_lun() for "
898			 "LUN %d", __func__, retval, params->lun_id);
899		mtx_lock(&softc->lock);
900		be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
901		mtx_unlock(&softc->lock);
902		goto bailout_error;
903	}
904
905	mtx_lock(&softc->lock);
906	while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
907		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlramrm", 0);
908		if (retval == EINTR)
909			break;
910	}
911	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
912	if (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
913		mtx_unlock(&softc->lock);
914		free(be_lun, M_RAMDISK);
915	} else {
916		mtx_unlock(&softc->lock);
917		return (EINTR);
918	}
919
920	req->status = CTL_LUN_OK;
921	return (retval);
922
923bailout_error:
924	req->status = CTL_LUN_ERROR;
925	return (0);
926}
927
928static int
929ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
930			   struct ctl_lun_req *req)
931{
932	struct ctl_be_ramdisk_lun *be_lun;
933	struct ctl_be_lun *cbe_lun;
934	struct ctl_lun_create_params *params;
935	const char *value;
936	char tmpstr[32];
937	uint64_t t;
938	int retval;
939
940	retval = 0;
941	params = &req->reqdata.create;
942
943	be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
944	cbe_lun = &be_lun->cbe_lun;
945	cbe_lun->options = nvlist_clone(req->args_nvl);
946	be_lun->params = req->reqdata.create;
947	be_lun->softc = softc;
948
949	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
950		cbe_lun->lun_type = params->device_type;
951	else
952		cbe_lun->lun_type = T_DIRECT;
953	be_lun->flags = 0;
954	cbe_lun->flags = 0;
955	value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL);
956	if (value != NULL) {
957		if (strcmp(value, "primary") == 0)
958			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
959	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
960		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
961
962	be_lun->pblocksize = PAGE_SIZE;
963	value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL);
964	if (value != NULL) {
965		ctl_expand_number(value, &t);
966		be_lun->pblocksize = t;
967	}
968	if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
969		snprintf(req->error_str, sizeof(req->error_str),
970			 "%s: unsupported pblocksize %u", __func__,
971			 be_lun->pblocksize);
972		goto bailout_error;
973	}
974
975	if (cbe_lun->lun_type == T_DIRECT ||
976	    cbe_lun->lun_type == T_CDROM) {
977		if (params->blocksize_bytes != 0)
978			cbe_lun->blocksize = params->blocksize_bytes;
979		else if (cbe_lun->lun_type == T_CDROM)
980			cbe_lun->blocksize = 2048;
981		else
982			cbe_lun->blocksize = 512;
983		be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
984		if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
985			snprintf(req->error_str, sizeof(req->error_str),
986				 "%s: pblocksize %u not exp2 of blocksize %u",
987				 __func__,
988				 be_lun->pblocksize, cbe_lun->blocksize);
989			goto bailout_error;
990		}
991		if (params->lun_size_bytes < cbe_lun->blocksize) {
992			snprintf(req->error_str, sizeof(req->error_str),
993				 "%s: LUN size %ju < blocksize %u", __func__,
994				 params->lun_size_bytes, cbe_lun->blocksize);
995			goto bailout_error;
996		}
997		be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
998		be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
999		be_lun->indir = 0;
1000		t = be_lun->size_bytes / be_lun->pblocksize;
1001		while (t > 1) {
1002			t /= PPP;
1003			be_lun->indir++;
1004		}
1005		cbe_lun->maxlba = be_lun->size_blocks - 1;
1006		cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
1007		cbe_lun->pblockoff = 0;
1008		cbe_lun->ublockexp = cbe_lun->pblockexp;
1009		cbe_lun->ublockoff = 0;
1010		cbe_lun->atomicblock = be_lun->pblocksize;
1011		cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
1012		value = dnvlist_get_string(cbe_lun->options, "capacity", NULL);
1013		if (value != NULL)
1014			ctl_expand_number(value, &be_lun->cap_bytes);
1015	} else {
1016		be_lun->pblockmul = 1;
1017		cbe_lun->pblockexp = 0;
1018	}
1019
1020	/* Tell the user the blocksize we ended up using */
1021	params->blocksize_bytes = cbe_lun->blocksize;
1022	params->lun_size_bytes = be_lun->size_bytes;
1023
1024	value = dnvlist_get_string(cbe_lun->options, "unmap", NULL);
1025	if (value == NULL || strcmp(value, "off") != 0)
1026		cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
1027	value = dnvlist_get_string(cbe_lun->options, "readonly", NULL);
1028	if (value != NULL) {
1029		if (strcmp(value, "on") == 0)
1030			cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1031	} else if (cbe_lun->lun_type != T_DIRECT)
1032		cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1033	cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1034	value = dnvlist_get_string(cbe_lun->options, "serseq", NULL);
1035	if (value != NULL && strcmp(value, "on") == 0)
1036		cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
1037	else if (value != NULL && strcmp(value, "read") == 0)
1038		cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
1039	else if (value != NULL && strcmp(value, "off") == 0)
1040		cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1041
1042	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1043		cbe_lun->req_lun_id = params->req_lun_id;
1044		cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
1045	} else
1046		cbe_lun->req_lun_id = 0;
1047
1048	cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
1049	cbe_lun->be = &ctl_be_ramdisk_driver;
1050	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1051		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d",
1052			 softc->num_luns);
1053		strncpy((char *)cbe_lun->serial_num, tmpstr,
1054			MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
1055
1056		/* Tell the user what we used for a serial number */
1057		strncpy((char *)params->serial_num, tmpstr,
1058			MIN(sizeof(params->serial_num), sizeof(tmpstr)));
1059	} else {
1060		strncpy((char *)cbe_lun->serial_num, params->serial_num,
1061			MIN(sizeof(cbe_lun->serial_num),
1062			    sizeof(params->serial_num)));
1063	}
1064	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1065		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns);
1066		strncpy((char *)cbe_lun->device_id, tmpstr,
1067			MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
1068
1069		/* Tell the user what we used for a device ID */
1070		strncpy((char *)params->device_id, tmpstr,
1071			MIN(sizeof(params->device_id), sizeof(tmpstr)));
1072	} else {
1073		strncpy((char *)cbe_lun->device_id, params->device_id,
1074			MIN(sizeof(cbe_lun->device_id),
1075			    sizeof(params->device_id)));
1076	}
1077
1078	STAILQ_INIT(&be_lun->cont_queue);
1079	sx_init(&be_lun->page_lock, "ctlram page");
1080	if (be_lun->cap_bytes == 0) {
1081		be_lun->indir = 0;
1082		be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
1083	}
1084	be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
1085	    M_WAITOK|M_ZERO);
1086	mtx_init(&be_lun->queue_lock, "ctlram queue", NULL, MTX_DEF);
1087	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
1088	    be_lun);
1089
1090	be_lun->io_taskqueue = taskqueue_create("ctlramtq", M_WAITOK,
1091	    taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1092	if (be_lun->io_taskqueue == NULL) {
1093		snprintf(req->error_str, sizeof(req->error_str),
1094			 "%s: Unable to create taskqueue", __func__);
1095		goto bailout_error;
1096	}
1097
1098	retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue,
1099					 /*num threads*/1,
1100					 /*priority*/PUSER,
1101					 /*proc*/control_softc->ctl_proc,
1102					 /*thread name*/"ramdisk");
1103	if (retval != 0)
1104		goto bailout_error;
1105
1106	retval = ctl_add_lun(&be_lun->cbe_lun);
1107	if (retval != 0) {
1108		snprintf(req->error_str, sizeof(req->error_str),
1109			 "%s: ctl_add_lun() returned error %d, see dmesg for "
1110			"details", __func__, retval);
1111		retval = 0;
1112		goto bailout_error;
1113	}
1114
1115	mtx_lock(&softc->lock);
1116	softc->num_luns++;
1117	SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links);
1118	mtx_unlock(&softc->lock);
1119
1120	params->req_lun_id = cbe_lun->lun_id;
1121
1122	req->status = CTL_LUN_OK;
1123	return (retval);
1124
1125bailout_error:
1126	req->status = CTL_LUN_ERROR;
1127	if (be_lun != NULL) {
1128		if (be_lun->io_taskqueue != NULL)
1129			taskqueue_free(be_lun->io_taskqueue);
1130		nvlist_destroy(cbe_lun->options);
1131		free(be_lun->zero_page, M_RAMDISK);
1132		ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1133		sx_destroy(&be_lun->page_lock);
1134		mtx_destroy(&be_lun->queue_lock);
1135		free(be_lun, M_RAMDISK);
1136	}
1137	return (retval);
1138}
1139
1140static int
1141ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
1142		       struct ctl_lun_req *req)
1143{
1144	struct ctl_be_ramdisk_lun *be_lun;
1145	struct ctl_be_lun *cbe_lun;
1146	struct ctl_lun_modify_params *params;
1147	const char *value;
1148	uint32_t blocksize;
1149	int wasprim;
1150
1151	params = &req->reqdata.modify;
1152	sx_xlock(&softc->modify_lock);
1153	mtx_lock(&softc->lock);
1154	SLIST_FOREACH(be_lun, &softc->lun_list, links) {
1155		if (be_lun->cbe_lun.lun_id == params->lun_id)
1156			break;
1157	}
1158	mtx_unlock(&softc->lock);
1159	if (be_lun == NULL) {
1160		snprintf(req->error_str, sizeof(req->error_str),
1161			 "%s: LUN %u is not managed by the ramdisk backend",
1162			 __func__, params->lun_id);
1163		goto bailout_error;
1164	}
1165	cbe_lun = &be_lun->cbe_lun;
1166
1167	if (params->lun_size_bytes != 0)
1168		be_lun->params.lun_size_bytes = params->lun_size_bytes;
1169
1170	if (req->args_nvl != NULL) {
1171		nvlist_destroy(cbe_lun->options);
1172		cbe_lun->options = nvlist_clone(req->args_nvl);
1173	}
1174
1175	wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
1176	value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL);
1177	if (value != NULL) {
1178		if (strcmp(value, "primary") == 0)
1179			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1180		else
1181			cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1182	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
1183		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1184	else
1185		cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1186	if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
1187		if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
1188			ctl_lun_primary(cbe_lun);
1189		else
1190			ctl_lun_secondary(cbe_lun);
1191	}
1192
1193	blocksize = be_lun->cbe_lun.blocksize;
1194	if (be_lun->params.lun_size_bytes < blocksize) {
1195		snprintf(req->error_str, sizeof(req->error_str),
1196			"%s: LUN size %ju < blocksize %u", __func__,
1197			be_lun->params.lun_size_bytes, blocksize);
1198		goto bailout_error;
1199	}
1200	be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
1201	be_lun->size_bytes = be_lun->size_blocks * blocksize;
1202	be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
1203	ctl_lun_capacity_changed(&be_lun->cbe_lun);
1204
1205	/* Tell the user the exact size we ended up using */
1206	params->lun_size_bytes = be_lun->size_bytes;
1207
1208	sx_xunlock(&softc->modify_lock);
1209	req->status = CTL_LUN_OK;
1210	return (0);
1211
1212bailout_error:
1213	sx_xunlock(&softc->modify_lock);
1214	req->status = CTL_LUN_ERROR;
1215	return (0);
1216}
1217
1218static void
1219ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun)
1220{
1221	struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
1222	struct ctl_be_ramdisk_softc *softc = be_lun->softc;
1223
1224	taskqueue_drain_all(be_lun->io_taskqueue);
1225	taskqueue_free(be_lun->io_taskqueue);
1226	nvlist_destroy(be_lun->cbe_lun.options);
1227	free(be_lun->zero_page, M_RAMDISK);
1228	ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1229	sx_destroy(&be_lun->page_lock);
1230	mtx_destroy(&be_lun->queue_lock);
1231
1232	mtx_lock(&softc->lock);
1233	be_lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1234	if (be_lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
1235		wakeup(be_lun);
1236	else
1237		free(be_lun, M_RAMDISK);
1238	mtx_unlock(&softc->lock);
1239}
1240