ctl_backend_block.c revision 246427
1/*-
2 * Copyright (c) 2003 Silicon Graphics International Corp.
3 * Copyright (c) 2009-2011 Spectra Logic Corporation
4 * Copyright (c) 2012 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions, and the following disclaimer,
15 *    without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 *    substantially similar to the "NO WARRANTY" disclaimer below
18 *    ("Disclaimer") and any redistribution must be conditioned upon
19 *    including a substantially similar Disclaimer requirement for further
20 *    binary redistribution.
21 *
22 * NO WARRANTY
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGES.
34 *
35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
36 */
37/*
38 * CAM Target Layer driver backend for block devices.
39 *
40 * Author: Ken Merry <ken@FreeBSD.org>
41 */
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: stable/9/sys/cam/ctl/ctl_backend_block.c 246427 2013-02-06 18:32:12Z mav $");
44
45#include <opt_kdtrace.h>
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/kernel.h>
50#include <sys/types.h>
51#include <sys/kthread.h>
52#include <sys/bio.h>
53#include <sys/fcntl.h>
54#include <sys/lock.h>
55#include <sys/mutex.h>
56#include <sys/condvar.h>
57#include <sys/malloc.h>
58#include <sys/conf.h>
59#include <sys/ioccom.h>
60#include <sys/queue.h>
61#include <sys/sbuf.h>
62#include <sys/endian.h>
63#include <sys/uio.h>
64#include <sys/buf.h>
65#include <sys/taskqueue.h>
66#include <sys/vnode.h>
67#include <sys/namei.h>
68#include <sys/mount.h>
69#include <sys/disk.h>
70#include <sys/fcntl.h>
71#include <sys/filedesc.h>
72#include <sys/proc.h>
73#include <sys/pcpu.h>
74#include <sys/module.h>
75#include <sys/sdt.h>
76#include <sys/devicestat.h>
77#include <sys/sysctl.h>
78
79#include <geom/geom.h>
80
81#include <cam/cam.h>
82#include <cam/scsi/scsi_all.h>
83#include <cam/scsi/scsi_da.h>
84#include <cam/ctl/ctl_io.h>
85#include <cam/ctl/ctl.h>
86#include <cam/ctl/ctl_backend.h>
87#include <cam/ctl/ctl_frontend_internal.h>
88#include <cam/ctl/ctl_ioctl.h>
89#include <cam/ctl/ctl_scsi_all.h>
90#include <cam/ctl/ctl_error.h>
91
92/*
93 * The idea here is that we'll allocate enough S/G space to hold a 16MB
94 * I/O.  If we get an I/O larger than that, we'll reject it.
95 */
96#define	CTLBLK_MAX_IO_SIZE	(16 * 1024 * 1024)
97#define	CTLBLK_MAX_SEGS		(CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1
98
99#ifdef CTLBLK_DEBUG
100#define DPRINTF(fmt, args...) \
101    printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
102#else
103#define DPRINTF(fmt, args...) do {} while(0)
104#endif
105
106SDT_PROVIDER_DEFINE(cbb);
107
108typedef enum {
109	CTL_BE_BLOCK_LUN_UNCONFIGURED	= 0x01,
110	CTL_BE_BLOCK_LUN_CONFIG_ERR	= 0x02,
111	CTL_BE_BLOCK_LUN_WAITING	= 0x04,
112	CTL_BE_BLOCK_LUN_MULTI_THREAD	= 0x08
113} ctl_be_block_lun_flags;
114
115typedef enum {
116	CTL_BE_BLOCK_NONE,
117	CTL_BE_BLOCK_DEV,
118	CTL_BE_BLOCK_FILE
119} ctl_be_block_type;
120
121struct ctl_be_block_devdata {
122	struct cdev *cdev;
123	struct cdevsw *csw;
124	int dev_ref;
125};
126
127struct ctl_be_block_filedata {
128	struct ucred *cred;
129};
130
131union ctl_be_block_bedata {
132	struct ctl_be_block_devdata dev;
133	struct ctl_be_block_filedata file;
134};
135
136struct ctl_be_block_io;
137struct ctl_be_block_lun;
138
139typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
140			       struct ctl_be_block_io *beio);
141
142/*
143 * Backend LUN structure.  There is a 1:1 mapping between a block device
144 * and a backend block LUN, and between a backend block LUN and a CTL LUN.
145 */
146struct ctl_be_block_lun {
147	struct ctl_block_disk *disk;
148	char lunname[32];
149	char *dev_path;
150	ctl_be_block_type dev_type;
151	struct vnode *vn;
152	union ctl_be_block_bedata backend;
153	cbb_dispatch_t dispatch;
154	cbb_dispatch_t lun_flush;
155	struct mtx lock;
156	uma_zone_t lun_zone;
157	uint64_t size_blocks;
158	uint64_t size_bytes;
159	uint32_t blocksize;
160	int blocksize_shift;
161	struct ctl_be_block_softc *softc;
162	struct devstat *disk_stats;
163	ctl_be_block_lun_flags flags;
164	STAILQ_ENTRY(ctl_be_block_lun) links;
165	struct ctl_be_lun ctl_be_lun;
166	struct taskqueue *io_taskqueue;
167	struct task io_task;
168	int num_threads;
169	STAILQ_HEAD(, ctl_io_hdr) input_queue;
170	STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
171	STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
172};
173
174/*
175 * Overall softc structure for the block backend module.
176 */
177struct ctl_be_block_softc {
178	STAILQ_HEAD(, ctl_be_block_io)   beio_free_queue;
179	struct mtx			 lock;
180	int				 prealloc_beio;
181	int				 num_disks;
182	STAILQ_HEAD(, ctl_block_disk)	 disk_list;
183	int				 num_luns;
184	STAILQ_HEAD(, ctl_be_block_lun)	 lun_list;
185};
186
187static struct ctl_be_block_softc backend_block_softc;
188
189/*
190 * Per-I/O information.
191 */
192struct ctl_be_block_io {
193	union ctl_io			*io;
194	struct ctl_sg_entry		sg_segs[CTLBLK_MAX_SEGS];
195	struct iovec			xiovecs[CTLBLK_MAX_SEGS];
196	int				bio_cmd;
197	int				bio_flags;
198	int				num_segs;
199	int				num_bios_sent;
200	int				num_bios_done;
201	int				send_complete;
202	int				num_errors;
203	struct bintime			ds_t0;
204	devstat_tag_type		ds_tag_type;
205	devstat_trans_flags		ds_trans_type;
206	uint64_t			io_len;
207	uint64_t			io_offset;
208	struct ctl_be_block_softc	*softc;
209	struct ctl_be_block_lun		*lun;
210	STAILQ_ENTRY(ctl_be_block_io)	links;
211};
212
213static int cbb_num_threads = 14;
214TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
215SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
216	    "CAM Target Layer Block Backend");
217SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
218           &cbb_num_threads, 0, "Number of threads per backing file");
219
220static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
221static void ctl_free_beio(struct ctl_be_block_io *beio);
222static int ctl_grow_beio(struct ctl_be_block_softc *softc, int count);
223#if 0
224static void ctl_shrink_beio(struct ctl_be_block_softc *softc);
225#endif
226static void ctl_complete_beio(struct ctl_be_block_io *beio);
227static int ctl_be_block_move_done(union ctl_io *io);
228static void ctl_be_block_biodone(struct bio *bio);
229static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
230				    struct ctl_be_block_io *beio);
231static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
232				       struct ctl_be_block_io *beio);
233static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
234				   struct ctl_be_block_io *beio);
235static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
236				      struct ctl_be_block_io *beio);
237static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
238				    union ctl_io *io);
239static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
240				  union ctl_io *io);
241static void ctl_be_block_worker(void *context, int pending);
242static int ctl_be_block_submit(union ctl_io *io);
243static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
244				   int flag, struct thread *td);
245static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun,
246				  struct ctl_lun_req *req);
247static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
248				 struct ctl_lun_req *req);
249static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
250static int ctl_be_block_open(struct ctl_be_block_softc *softc,
251			     struct ctl_be_block_lun *be_lun,
252			     struct ctl_lun_req *req);
253static int ctl_be_block_create(struct ctl_be_block_softc *softc,
254			       struct ctl_lun_req *req);
255static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
256			   struct ctl_lun_req *req);
257static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
258				  struct ctl_lun_req *req);
259static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
260				 struct ctl_lun_req *req);
261static int ctl_be_block_modify(struct ctl_be_block_softc *softc,
262			   struct ctl_lun_req *req);
263static void ctl_be_block_lun_shutdown(void *be_lun);
264static void ctl_be_block_lun_config_status(void *be_lun,
265					   ctl_lun_config_status status);
266static int ctl_be_block_config_write(union ctl_io *io);
267static int ctl_be_block_config_read(union ctl_io *io);
268static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
269int ctl_be_block_init(void);
270
271static struct ctl_backend_driver ctl_be_block_driver =
272{
273	.name = "block",
274	.flags = CTL_BE_FLAG_HAS_CONFIG,
275	.init = ctl_be_block_init,
276	.data_submit = ctl_be_block_submit,
277	.data_move_done = ctl_be_block_move_done,
278	.config_read = ctl_be_block_config_read,
279	.config_write = ctl_be_block_config_write,
280	.ioctl = ctl_be_block_ioctl,
281	.lun_info = ctl_be_block_lun_info
282};
283
284MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
285CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
286
287static struct ctl_be_block_io *
288ctl_alloc_beio(struct ctl_be_block_softc *softc)
289{
290	struct ctl_be_block_io *beio;
291	int count;
292
293	mtx_lock(&softc->lock);
294
295	beio = STAILQ_FIRST(&softc->beio_free_queue);
296	if (beio != NULL) {
297		STAILQ_REMOVE(&softc->beio_free_queue, beio,
298			      ctl_be_block_io, links);
299	}
300	mtx_unlock(&softc->lock);
301
302	if (beio != NULL) {
303		bzero(beio, sizeof(*beio));
304		beio->softc = softc;
305		return (beio);
306	}
307
308	for (;;) {
309
310		count = ctl_grow_beio(softc, /*count*/ 10);
311
312		/*
313		 * This shouldn't be possible, since ctl_grow_beio() uses a
314		 * blocking malloc.
315		 */
316		if (count == 0)
317			return (NULL);
318
319		/*
320		 * Since we have to drop the lock when we're allocating beio
321		 * structures, it's possible someone else can come along and
322		 * allocate the beio's we've just allocated.
323		 */
324		mtx_lock(&softc->lock);
325		beio = STAILQ_FIRST(&softc->beio_free_queue);
326		if (beio != NULL) {
327			STAILQ_REMOVE(&softc->beio_free_queue, beio,
328				      ctl_be_block_io, links);
329		}
330		mtx_unlock(&softc->lock);
331
332		if (beio != NULL) {
333			bzero(beio, sizeof(*beio));
334			beio->softc = softc;
335			break;
336		}
337	}
338	return (beio);
339}
340
341static void
342ctl_free_beio(struct ctl_be_block_io *beio)
343{
344	struct ctl_be_block_softc *softc;
345	int duplicate_free;
346	int i;
347
348	softc = beio->softc;
349	duplicate_free = 0;
350
351	for (i = 0; i < beio->num_segs; i++) {
352		if (beio->sg_segs[i].addr == NULL)
353			duplicate_free++;
354
355		uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
356		beio->sg_segs[i].addr = NULL;
357	}
358
359	if (duplicate_free > 0) {
360		printf("%s: %d duplicate frees out of %d segments\n", __func__,
361		       duplicate_free, beio->num_segs);
362	}
363	mtx_lock(&softc->lock);
364	STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
365	mtx_unlock(&softc->lock);
366}
367
368static int
369ctl_grow_beio(struct ctl_be_block_softc *softc, int count)
370{
371	int i;
372
373	for (i = 0; i < count; i++) {
374		struct ctl_be_block_io *beio;
375
376		beio = (struct ctl_be_block_io *)malloc(sizeof(*beio),
377							   M_CTLBLK,
378							   M_WAITOK | M_ZERO);
379		bzero(beio, sizeof(*beio));
380		beio->softc = softc;
381		mtx_lock(&softc->lock);
382		STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
383		mtx_unlock(&softc->lock);
384	}
385
386	return (i);
387}
388
389#if 0
390static void
391ctl_shrink_beio(struct ctl_be_block_softc *softc)
392{
393	struct ctl_be_block_io *beio, *beio_tmp;
394
395	mtx_lock(&softc->lock);
396	STAILQ_FOREACH_SAFE(beio, &softc->beio_free_queue, links, beio_tmp) {
397		STAILQ_REMOVE(&softc->beio_free_queue, beio,
398			      ctl_be_block_io, links);
399		free(beio, M_CTLBLK);
400	}
401	mtx_unlock(&softc->lock);
402}
403#endif
404
405static void
406ctl_complete_beio(struct ctl_be_block_io *beio)
407{
408	union ctl_io *io;
409	int io_len;
410
411	io = beio->io;
412
413	if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
414		io_len = beio->io_len;
415	else
416		io_len = 0;
417
418	devstat_end_transaction(beio->lun->disk_stats,
419				/*bytes*/ io_len,
420				beio->ds_tag_type,
421				beio->ds_trans_type,
422				/*now*/ NULL,
423				/*then*/&beio->ds_t0);
424
425	ctl_free_beio(beio);
426	ctl_done(io);
427}
428
429static int
430ctl_be_block_move_done(union ctl_io *io)
431{
432	struct ctl_be_block_io *beio;
433	struct ctl_be_block_lun *be_lun;
434#ifdef CTL_TIME_IO
435	struct bintime cur_bt;
436#endif
437
438	beio = (struct ctl_be_block_io *)
439		io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
440
441	be_lun = beio->lun;
442
443	DPRINTF("entered\n");
444
445#ifdef CTL_TIME_IO
446	getbintime(&cur_bt);
447	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
448	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
449	io->io_hdr.num_dmas++;
450#endif
451
452	/*
453	 * We set status at this point for read commands, and write
454	 * commands with errors.
455	 */
456	if ((beio->bio_cmd == BIO_READ)
457	 && (io->io_hdr.port_status == 0)
458	 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
459	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
460		ctl_set_success(&io->scsiio);
461	else if ((io->io_hdr.port_status != 0)
462	      && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
463	      && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
464		/*
465		 * For hardware error sense keys, the sense key
466		 * specific value is defined to be a retry count,
467		 * but we use it to pass back an internal FETD
468		 * error code.  XXX KDM  Hopefully the FETD is only
469		 * using 16 bits for an error code, since that's
470		 * all the space we have in the sks field.
471		 */
472		ctl_set_internal_failure(&io->scsiio,
473					 /*sks_valid*/ 1,
474					 /*retry_count*/
475					 io->io_hdr.port_status);
476	}
477
478	/*
479	 * If this is a read, or a write with errors, it is done.
480	 */
481	if ((beio->bio_cmd == BIO_READ)
482	 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)
483	 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) {
484		ctl_complete_beio(beio);
485		return (0);
486	}
487
488	/*
489	 * At this point, we have a write and the DMA completed
490	 * successfully.  We now have to queue it to the task queue to
491	 * execute the backend I/O.  That is because we do blocking
492	 * memory allocations, and in the file backing case, blocking I/O.
493	 * This move done routine is generally called in the SIM's
494	 * interrupt context, and therefore we cannot block.
495	 */
496	mtx_lock(&be_lun->lock);
497	/*
498	 * XXX KDM make sure that links is okay to use at this point.
499	 * Otherwise, we either need to add another field to ctl_io_hdr,
500	 * or deal with resource allocation here.
501	 */
502	STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
503	mtx_unlock(&be_lun->lock);
504
505	taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
506
507	return (0);
508}
509
510static void
511ctl_be_block_biodone(struct bio *bio)
512{
513	struct ctl_be_block_io *beio;
514	struct ctl_be_block_lun *be_lun;
515	union ctl_io *io;
516
517	beio = bio->bio_caller1;
518	be_lun = beio->lun;
519	io = beio->io;
520
521	DPRINTF("entered\n");
522
523	mtx_lock(&be_lun->lock);
524	if (bio->bio_error != 0)
525		beio->num_errors++;
526
527	beio->num_bios_done++;
528
529	/*
530	 * XXX KDM will this cause WITNESS to complain?  Holding a lock
531	 * during the free might cause it to complain.
532	 */
533	g_destroy_bio(bio);
534
535	/*
536	 * If the send complete bit isn't set, or we aren't the last I/O to
537	 * complete, then we're done.
538	 */
539	if ((beio->send_complete == 0)
540	 || (beio->num_bios_done < beio->num_bios_sent)) {
541		mtx_unlock(&be_lun->lock);
542		return;
543	}
544
545	/*
546	 * At this point, we've verified that we are the last I/O to
547	 * complete, so it's safe to drop the lock.
548	 */
549	mtx_unlock(&be_lun->lock);
550
551	/*
552	 * If there are any errors from the backing device, we fail the
553	 * entire I/O with a medium error.
554	 */
555	if (beio->num_errors > 0) {
556		if (beio->bio_cmd == BIO_FLUSH) {
557			/* XXX KDM is there is a better error here? */
558			ctl_set_internal_failure(&io->scsiio,
559						 /*sks_valid*/ 1,
560						 /*retry_count*/ 0xbad2);
561		} else
562			ctl_set_medium_error(&io->scsiio);
563		ctl_complete_beio(beio);
564		return;
565	}
566
567	/*
568	 * If this is a write or a flush, we're all done.
569	 * If this is a read, we can now send the data to the user.
570	 */
571	if ((beio->bio_cmd == BIO_WRITE)
572	 || (beio->bio_cmd == BIO_FLUSH)) {
573		ctl_set_success(&io->scsiio);
574		ctl_complete_beio(beio);
575	} else {
576		io->scsiio.be_move_done = ctl_be_block_move_done;
577		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
578		io->scsiio.kern_data_len = beio->io_len;
579		io->scsiio.kern_total_len = beio->io_len;
580		io->scsiio.kern_rel_offset = 0;
581		io->scsiio.kern_data_resid = 0;
582		io->scsiio.kern_sg_entries = beio->num_segs;
583		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
584#ifdef CTL_TIME_IO
585        	getbintime(&io->io_hdr.dma_start_bt);
586#endif
587		ctl_datamove(io);
588	}
589}
590
591static void
592ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
593			struct ctl_be_block_io *beio)
594{
595	union ctl_io *io;
596	struct mount *mountpoint;
597	int vfs_is_locked, error, lock_flags;
598
599	DPRINTF("entered\n");
600
601	io = beio->io;
602
603	vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
604
605       	(void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
606
607	if (MNT_SHARED_WRITES(mountpoint)
608	 || ((mountpoint == NULL)
609	  && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
610		lock_flags = LK_SHARED;
611	else
612		lock_flags = LK_EXCLUSIVE;
613
614	vn_lock(be_lun->vn, lock_flags | LK_RETRY);
615
616	binuptime(&beio->ds_t0);
617	devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
618
619	error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
620	VOP_UNLOCK(be_lun->vn, 0);
621
622	vn_finished_write(mountpoint);
623
624	VFS_UNLOCK_GIANT(vfs_is_locked);
625
626	if (error == 0)
627		ctl_set_success(&io->scsiio);
628	else {
629		/* XXX KDM is there is a better error here? */
630		ctl_set_internal_failure(&io->scsiio,
631					 /*sks_valid*/ 1,
632					 /*retry_count*/ 0xbad1);
633	}
634
635	ctl_complete_beio(beio);
636}
637
638SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, file_start, "uint64_t");
639SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, file_start, "uint64_t");
640SDT_PROBE_DEFINE1(cbb, kernel, read, file_done, file_done,"uint64_t");
641SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, file_done, "uint64_t");
642
643static void
644ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
645			   struct ctl_be_block_io *beio)
646{
647	struct ctl_be_block_filedata *file_data;
648	union ctl_io *io;
649	struct uio xuio;
650	struct iovec *xiovec;
651	int vfs_is_locked, flags;
652	int error, i;
653
654	DPRINTF("entered\n");
655
656	file_data = &be_lun->backend.file;
657	io = beio->io;
658	flags = beio->bio_flags;
659
660	if (beio->bio_cmd == BIO_READ) {
661		SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
662	} else {
663		SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
664	}
665
666	bzero(&xuio, sizeof(xuio));
667	if (beio->bio_cmd == BIO_READ)
668		xuio.uio_rw = UIO_READ;
669	else
670		xuio.uio_rw = UIO_WRITE;
671
672	xuio.uio_offset = beio->io_offset;
673	xuio.uio_resid = beio->io_len;
674	xuio.uio_segflg = UIO_SYSSPACE;
675	xuio.uio_iov = beio->xiovecs;
676	xuio.uio_iovcnt = beio->num_segs;
677	xuio.uio_td = curthread;
678
679	for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
680		xiovec->iov_base = beio->sg_segs[i].addr;
681		xiovec->iov_len = beio->sg_segs[i].len;
682	}
683
684	vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
685	if (beio->bio_cmd == BIO_READ) {
686		vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
687
688		binuptime(&beio->ds_t0);
689		devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
690
691		/*
692		 * UFS pays attention to IO_DIRECT for reads.  If the
693		 * DIRECTIO option is configured into the kernel, it calls
694		 * ffs_rawread().  But that only works for single-segment
695		 * uios with user space addresses.  In our case, with a
696		 * kernel uio, it still reads into the buffer cache, but it
697		 * will just try to release the buffer from the cache later
698		 * on in ffs_read().
699		 *
700		 * ZFS does not pay attention to IO_DIRECT for reads.
701		 *
702		 * UFS does not pay attention to IO_SYNC for reads.
703		 *
704		 * ZFS pays attention to IO_SYNC (which translates into the
705		 * Solaris define FRSYNC for zfs_read()) for reads.  It
706		 * attempts to sync the file before reading.
707		 *
708		 * So, to attempt to provide some barrier semantics in the
709		 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
710		 */
711		error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
712				 (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
713
714		VOP_UNLOCK(be_lun->vn, 0);
715	} else {
716		struct mount *mountpoint;
717		int lock_flags;
718
719		(void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
720
721		if (MNT_SHARED_WRITES(mountpoint)
722		 || ((mountpoint == NULL)
723		  && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
724			lock_flags = LK_SHARED;
725		else
726			lock_flags = LK_EXCLUSIVE;
727
728		vn_lock(be_lun->vn, lock_flags | LK_RETRY);
729
730		binuptime(&beio->ds_t0);
731		devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
732
733		/*
734		 * UFS pays attention to IO_DIRECT for writes.  The write
735		 * is done asynchronously.  (Normally the write would just
736		 * get put into cache.
737		 *
738		 * UFS pays attention to IO_SYNC for writes.  It will
739		 * attempt to write the buffer out synchronously if that
740		 * flag is set.
741		 *
742		 * ZFS does not pay attention to IO_DIRECT for writes.
743		 *
744		 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
745		 * for writes.  It will flush the transaction from the
746		 * cache before returning.
747		 *
748		 * So if we've got the BIO_ORDERED flag set, we want
749		 * IO_SYNC in either the UFS or ZFS case.
750		 */
751		error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
752				  IO_SYNC : 0, file_data->cred);
753		VOP_UNLOCK(be_lun->vn, 0);
754
755		vn_finished_write(mountpoint);
756        }
757        VFS_UNLOCK_GIANT(vfs_is_locked);
758
759	/*
760	 * If we got an error, set the sense data to "MEDIUM ERROR" and
761	 * return the I/O to the user.
762	 */
763	if (error != 0) {
764		char path_str[32];
765
766		ctl_scsi_path_string(io, path_str, sizeof(path_str));
767		/*
768		 * XXX KDM ZFS returns ENOSPC when the underlying
769		 * filesystem fills up.  What kind of SCSI error should we
770		 * return for that?
771		 */
772		printf("%s%s command returned errno %d\n", path_str,
773		       (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
774		ctl_set_medium_error(&io->scsiio);
775		ctl_complete_beio(beio);
776		return;
777	}
778
779	/*
780	 * If this is a write, we're all done.
781	 * If this is a read, we can now send the data to the user.
782	 */
783	if (beio->bio_cmd == BIO_WRITE) {
784		ctl_set_success(&io->scsiio);
785		SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
786		ctl_complete_beio(beio);
787	} else {
788		SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
789		io->scsiio.be_move_done = ctl_be_block_move_done;
790		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
791		io->scsiio.kern_data_len = beio->io_len;
792		io->scsiio.kern_total_len = beio->io_len;
793		io->scsiio.kern_rel_offset = 0;
794		io->scsiio.kern_data_resid = 0;
795		io->scsiio.kern_sg_entries = beio->num_segs;
796		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
797#ifdef CTL_TIME_IO
798        	getbintime(&io->io_hdr.dma_start_bt);
799#endif
800		ctl_datamove(io);
801	}
802}
803
804static void
805ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
806		       struct ctl_be_block_io *beio)
807{
808	struct bio *bio;
809	union ctl_io *io;
810	struct ctl_be_block_devdata *dev_data;
811
812	dev_data = &be_lun->backend.dev;
813	io = beio->io;
814
815	DPRINTF("entered\n");
816
817	/* This can't fail, it's a blocking allocation. */
818	bio = g_alloc_bio();
819
820	bio->bio_cmd	    = BIO_FLUSH;
821	bio->bio_flags	   |= BIO_ORDERED;
822	bio->bio_dev	    = dev_data->cdev;
823	bio->bio_offset	    = 0;
824	bio->bio_data	    = 0;
825	bio->bio_done	    = ctl_be_block_biodone;
826	bio->bio_caller1    = beio;
827	bio->bio_pblkno	    = 0;
828
829	/*
830	 * We don't need to acquire the LUN lock here, because we are only
831	 * sending one bio, and so there is no other context to synchronize
832	 * with.
833	 */
834	beio->num_bios_sent = 1;
835	beio->send_complete = 1;
836
837	binuptime(&beio->ds_t0);
838	devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
839
840	(*dev_data->csw->d_strategy)(bio);
841}
842
843static void
844ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
845			  struct ctl_be_block_io *beio)
846{
847	int i;
848	struct bio *bio;
849	struct ctl_be_block_devdata *dev_data;
850	off_t cur_offset;
851	int max_iosize;
852
853	DPRINTF("entered\n");
854
855	dev_data = &be_lun->backend.dev;
856
857	/*
858	 * We have to limit our I/O size to the maximum supported by the
859	 * backend device.  Hopefully it is MAXPHYS.  If the driver doesn't
860	 * set it properly, use DFLTPHYS.
861	 */
862	max_iosize = dev_data->cdev->si_iosize_max;
863	if (max_iosize < PAGE_SIZE)
864		max_iosize = DFLTPHYS;
865
866	cur_offset = beio->io_offset;
867
868	/*
869	 * XXX KDM need to accurately reflect the number of I/Os outstanding
870	 * to a device.
871	 */
872	binuptime(&beio->ds_t0);
873	devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
874
875	for (i = 0; i < beio->num_segs; i++) {
876		size_t cur_size;
877		uint8_t *cur_ptr;
878
879		cur_size = beio->sg_segs[i].len;
880		cur_ptr = beio->sg_segs[i].addr;
881
882		while (cur_size > 0) {
883			/* This can't fail, it's a blocking allocation. */
884			bio = g_alloc_bio();
885
886			KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
887
888			bio->bio_cmd = beio->bio_cmd;
889			bio->bio_flags |= beio->bio_flags;
890			bio->bio_dev = dev_data->cdev;
891			bio->bio_caller1 = beio;
892			bio->bio_length = min(cur_size, max_iosize);
893			bio->bio_offset = cur_offset;
894			bio->bio_data = cur_ptr;
895			bio->bio_done = ctl_be_block_biodone;
896			bio->bio_pblkno = cur_offset / be_lun->blocksize;
897
898			cur_offset += bio->bio_length;
899			cur_ptr += bio->bio_length;
900			cur_size -= bio->bio_length;
901
902			/*
903			 * Make sure we set the complete bit just before we
904			 * issue the last bio so we don't wind up with a
905			 * race.
906			 *
907			 * Use the LUN mutex here instead of a combination
908			 * of atomic variables for simplicity.
909			 *
910			 * XXX KDM we could have a per-IO lock, but that
911			 * would cause additional per-IO setup and teardown
912			 * overhead.  Hopefully there won't be too much
913			 * contention on the LUN lock.
914			 */
915			mtx_lock(&be_lun->lock);
916
917			beio->num_bios_sent++;
918
919			if ((i == beio->num_segs - 1)
920			 && (cur_size == 0))
921				beio->send_complete = 1;
922
923			mtx_unlock(&be_lun->lock);
924
925			(*dev_data->csw->d_strategy)(bio);
926		}
927	}
928}
929
930static void
931ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
932			 union ctl_io *io)
933{
934	struct ctl_be_block_io *beio;
935	struct ctl_be_block_softc *softc;
936
937	DPRINTF("entered\n");
938
939	softc = be_lun->softc;
940	beio = ctl_alloc_beio(softc);
941	if (beio == NULL) {
942		/*
943		 * This should not happen.  ctl_alloc_beio() will call
944		 * ctl_grow_beio() with a blocking malloc as needed.
945		 * A malloc with M_WAITOK should not fail.
946		 */
947		ctl_set_busy(&io->scsiio);
948		ctl_done(io);
949		return;
950	}
951
952	beio->io = io;
953	beio->softc = softc;
954	beio->lun = be_lun;
955	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
956
957	switch (io->scsiio.cdb[0]) {
958	case SYNCHRONIZE_CACHE:
959	case SYNCHRONIZE_CACHE_16:
960		beio->ds_trans_type = DEVSTAT_NO_DATA;
961		beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
962		beio->io_len = 0;
963		be_lun->lun_flush(be_lun, beio);
964		break;
965	default:
966		panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
967		break;
968	}
969}
970
971SDT_PROBE_DEFINE1(cbb, kernel, read, start, start, "uint64_t");
972SDT_PROBE_DEFINE1(cbb, kernel, write, start, start, "uint64_t");
973SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, alloc_done, "uint64_t");
974SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, alloc_done, "uint64_t");
975
976static void
977ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
978			   union ctl_io *io)
979{
980	struct ctl_be_block_io *beio;
981	struct ctl_be_block_softc *softc;
982	struct ctl_lba_len lbalen;
983	uint64_t len_left, io_size_bytes;
984	int i;
985
986	softc = be_lun->softc;
987
988	DPRINTF("entered\n");
989
990	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
991		SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
992	} else {
993		SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
994	}
995
996	memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
997	       sizeof(lbalen));
998
999	io_size_bytes = lbalen.len * be_lun->blocksize;
1000
1001	/*
1002	 * XXX KDM this is temporary, until we implement chaining of beio
1003	 * structures and multiple datamove calls to move all the data in
1004	 * or out.
1005	 */
1006	if (io_size_bytes > CTLBLK_MAX_IO_SIZE) {
1007		printf("%s: IO length %ju > max io size %u\n", __func__,
1008		       io_size_bytes, CTLBLK_MAX_IO_SIZE);
1009		ctl_set_invalid_field(&io->scsiio,
1010				      /*sks_valid*/ 0,
1011				      /*command*/ 1,
1012				      /*field*/ 0,
1013				      /*bit_valid*/ 0,
1014				      /*bit*/ 0);
1015		ctl_done(io);
1016		return;
1017	}
1018
1019	beio = ctl_alloc_beio(softc);
1020	if (beio == NULL) {
1021		/*
1022		 * This should not happen.  ctl_alloc_beio() will call
1023		 * ctl_grow_beio() with a blocking malloc as needed.
1024		 * A malloc with M_WAITOK should not fail.
1025		 */
1026		ctl_set_busy(&io->scsiio);
1027		ctl_done(io);
1028		return;
1029	}
1030
1031	beio->io = io;
1032	beio->softc = softc;
1033	beio->lun = be_lun;
1034	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
1035
1036	/*
1037	 * If the I/O came down with an ordered or head of queue tag, set
1038	 * the BIO_ORDERED attribute.  For head of queue tags, that's
1039	 * pretty much the best we can do.
1040	 *
1041	 * XXX KDM we don't have a great way to easily know about the FUA
1042	 * bit right now (it is decoded in ctl_read_write(), but we don't
1043	 * pass that knowledge to the backend), and in any case we would
1044	 * need to determine how to handle it.
1045	 */
1046	if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
1047	 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
1048		beio->bio_flags = BIO_ORDERED;
1049
1050	switch (io->scsiio.tag_type) {
1051	case CTL_TAG_ORDERED:
1052		beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1053		break;
1054	case CTL_TAG_HEAD_OF_QUEUE:
1055		beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1056		break;
1057	case CTL_TAG_UNTAGGED:
1058	case CTL_TAG_SIMPLE:
1059	case CTL_TAG_ACA:
1060	default:
1061		beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1062		break;
1063	}
1064
1065	/*
1066	 * This path handles read and write only.  The config write path
1067	 * handles flush operations.
1068	 */
1069	if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
1070		beio->bio_cmd = BIO_READ;
1071		beio->ds_trans_type = DEVSTAT_READ;
1072	} else {
1073		beio->bio_cmd = BIO_WRITE;
1074		beio->ds_trans_type = DEVSTAT_WRITE;
1075	}
1076
1077	beio->io_len = lbalen.len * be_lun->blocksize;
1078	beio->io_offset = lbalen.lba * be_lun->blocksize;
1079
1080	DPRINTF("%s at LBA %jx len %u\n",
1081	       (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
1082	       (uintmax_t)lbalen.lba, lbalen.len);
1083
1084	for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS &&
1085	     len_left > 0; i++) {
1086
1087		/*
1088		 * Setup the S/G entry for this chunk.
1089		 */
1090		beio->sg_segs[i].len = min(MAXPHYS, len_left);
1091		beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
1092
1093		DPRINTF("segment %d addr %p len %zd\n", i,
1094			beio->sg_segs[i].addr, beio->sg_segs[i].len);
1095
1096		beio->num_segs++;
1097		len_left -= beio->sg_segs[i].len;
1098	}
1099
1100	/*
1101	 * For the read case, we need to read the data into our buffers and
1102	 * then we can send it back to the user.  For the write case, we
1103	 * need to get the data from the user first.
1104	 */
1105	if (beio->bio_cmd == BIO_READ) {
1106		SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0);
1107		be_lun->dispatch(be_lun, beio);
1108	} else {
1109		SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
1110		io->scsiio.be_move_done = ctl_be_block_move_done;
1111		io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
1112		io->scsiio.kern_data_len = beio->io_len;
1113		io->scsiio.kern_total_len = beio->io_len;
1114		io->scsiio.kern_rel_offset = 0;
1115		io->scsiio.kern_data_resid = 0;
1116		io->scsiio.kern_sg_entries = beio->num_segs;
1117		io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
1118#ifdef CTL_TIME_IO
1119        	getbintime(&io->io_hdr.dma_start_bt);
1120#endif
1121		ctl_datamove(io);
1122	}
1123}
1124
1125static void
1126ctl_be_block_worker(void *context, int pending)
1127{
1128	struct ctl_be_block_lun *be_lun;
1129	struct ctl_be_block_softc *softc;
1130	union ctl_io *io;
1131
1132	be_lun = (struct ctl_be_block_lun *)context;
1133	softc = be_lun->softc;
1134
1135	DPRINTF("entered\n");
1136
1137	mtx_lock(&be_lun->lock);
1138	for (;;) {
1139		io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
1140		if (io != NULL) {
1141			struct ctl_be_block_io *beio;
1142
1143			DPRINTF("datamove queue\n");
1144
1145			STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
1146				      ctl_io_hdr, links);
1147
1148			mtx_unlock(&be_lun->lock);
1149
1150			beio = (struct ctl_be_block_io *)
1151			    io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
1152
1153			be_lun->dispatch(be_lun, beio);
1154
1155			mtx_lock(&be_lun->lock);
1156			continue;
1157		}
1158		io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
1159		if (io != NULL) {
1160
1161			DPRINTF("config write queue\n");
1162
1163			STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
1164				      ctl_io_hdr, links);
1165
1166			mtx_unlock(&be_lun->lock);
1167
1168			ctl_be_block_cw_dispatch(be_lun, io);
1169
1170			mtx_lock(&be_lun->lock);
1171			continue;
1172		}
1173		io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
1174		if (io != NULL) {
1175			DPRINTF("input queue\n");
1176
1177			STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
1178				      ctl_io_hdr, links);
1179			mtx_unlock(&be_lun->lock);
1180
1181			/*
1182			 * We must drop the lock, since this routine and
1183			 * its children may sleep.
1184			 */
1185			ctl_be_block_dispatch(be_lun, io);
1186
1187			mtx_lock(&be_lun->lock);
1188			continue;
1189		}
1190
1191		/*
1192		 * If we get here, there is no work left in the queues, so
1193		 * just break out and let the task queue go to sleep.
1194		 */
1195		break;
1196	}
1197	mtx_unlock(&be_lun->lock);
1198}
1199
1200/*
1201 * Entry point from CTL to the backend for I/O.  We queue everything to a
1202 * work thread, so this just puts the I/O on a queue and wakes up the
1203 * thread.
1204 */
1205static int
1206ctl_be_block_submit(union ctl_io *io)
1207{
1208	struct ctl_be_block_lun *be_lun;
1209	struct ctl_be_lun *ctl_be_lun;
1210	int retval;
1211
1212	DPRINTF("entered\n");
1213
1214	retval = CTL_RETVAL_COMPLETE;
1215
1216	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
1217		CTL_PRIV_BACKEND_LUN].ptr;
1218	be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
1219
1220	/*
1221	 * Make sure we only get SCSI I/O.
1222	 */
1223	KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
1224		"%#x) encountered", io->io_hdr.io_type));
1225
1226	mtx_lock(&be_lun->lock);
1227	/*
1228	 * XXX KDM make sure that links is okay to use at this point.
1229	 * Otherwise, we either need to add another field to ctl_io_hdr,
1230	 * or deal with resource allocation here.
1231	 */
1232	STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1233	mtx_unlock(&be_lun->lock);
1234
1235	taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1236
1237	return (retval);
1238}
1239
1240static int
1241ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
1242			int flag, struct thread *td)
1243{
1244	struct ctl_be_block_softc *softc;
1245	int error;
1246
1247	softc = &backend_block_softc;
1248
1249	error = 0;
1250
1251	switch (cmd) {
1252	case CTL_LUN_REQ: {
1253		struct ctl_lun_req *lun_req;
1254
1255		lun_req = (struct ctl_lun_req *)addr;
1256
1257		switch (lun_req->reqtype) {
1258		case CTL_LUNREQ_CREATE:
1259			error = ctl_be_block_create(softc, lun_req);
1260			break;
1261		case CTL_LUNREQ_RM:
1262			error = ctl_be_block_rm(softc, lun_req);
1263			break;
1264		case CTL_LUNREQ_MODIFY:
1265			error = ctl_be_block_modify(softc, lun_req);
1266			break;
1267		default:
1268			lun_req->status = CTL_LUN_ERROR;
1269			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
1270				 "%s: invalid LUN request type %d", __func__,
1271				 lun_req->reqtype);
1272			break;
1273		}
1274		break;
1275	}
1276	default:
1277		error = ENOTTY;
1278		break;
1279	}
1280
1281	return (error);
1282}
1283
1284static int
1285ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1286{
1287	struct ctl_be_block_filedata *file_data;
1288	struct ctl_lun_create_params *params;
1289	struct vattr		      vattr;
1290	int			      error;
1291
1292	error = 0;
1293	file_data = &be_lun->backend.file;
1294	params = &req->reqdata.create;
1295
1296	be_lun->dev_type = CTL_BE_BLOCK_FILE;
1297	be_lun->dispatch = ctl_be_block_dispatch_file;
1298	be_lun->lun_flush = ctl_be_block_flush_file;
1299
1300	error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
1301	if (error != 0) {
1302		snprintf(req->error_str, sizeof(req->error_str),
1303			 "error calling VOP_GETATTR() for file %s",
1304			 be_lun->dev_path);
1305		return (error);
1306	}
1307
1308	/*
1309	 * Verify that we have the ability to upgrade to exclusive
1310	 * access on this file so we can trap errors at open instead
1311	 * of reporting them during first access.
1312	 */
1313	if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) {
1314		vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY);
1315		if (be_lun->vn->v_iflag & VI_DOOMED) {
1316			error = EBADF;
1317			snprintf(req->error_str, sizeof(req->error_str),
1318				 "error locking file %s", be_lun->dev_path);
1319			return (error);
1320		}
1321	}
1322
1323
1324	file_data->cred = crhold(curthread->td_ucred);
1325	if (params->lun_size_bytes != 0)
1326		be_lun->size_bytes = params->lun_size_bytes;
1327	else
1328		be_lun->size_bytes = vattr.va_size;
1329	/*
1330	 * We set the multi thread flag for file operations because all
1331	 * filesystems (in theory) are capable of allowing multiple readers
1332	 * of a file at once.  So we want to get the maximum possible
1333	 * concurrency.
1334	 */
1335	be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD;
1336
1337	/*
1338	 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
1339	 * With ZFS, it is 131072 bytes.  Block sizes that large don't work
1340	 * with disklabel and UFS on FreeBSD at least.  Large block sizes
1341	 * may not work with other OSes as well.  So just export a sector
1342	 * size of 512 bytes, which should work with any OS or
1343	 * application.  Since our backing is a file, any block size will
1344	 * work fine for the backing store.
1345	 */
1346#if 0
1347	be_lun->blocksize= vattr.va_blocksize;
1348#endif
1349	if (params->blocksize_bytes != 0)
1350		be_lun->blocksize = params->blocksize_bytes;
1351	else
1352		be_lun->blocksize = 512;
1353
1354	/*
1355	 * Sanity check.  The media size has to be at least one
1356	 * sector long.
1357	 */
1358	if (be_lun->size_bytes < be_lun->blocksize) {
1359		error = EINVAL;
1360		snprintf(req->error_str, sizeof(req->error_str),
1361			 "file %s size %ju < block size %u", be_lun->dev_path,
1362			 (uintmax_t)be_lun->size_bytes, be_lun->blocksize);
1363	}
1364	return (error);
1365}
1366
1367static int
1368ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1369{
1370	struct ctl_lun_create_params *params;
1371	struct vattr		      vattr;
1372	struct cdev		     *dev;
1373	struct cdevsw		     *devsw;
1374	int			      error;
1375
1376	params = &req->reqdata.create;
1377
1378	be_lun->dev_type = CTL_BE_BLOCK_DEV;
1379	be_lun->dispatch = ctl_be_block_dispatch_dev;
1380	be_lun->lun_flush = ctl_be_block_flush_dev;
1381	be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
1382	be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
1383					     &be_lun->backend.dev.dev_ref);
1384	if (be_lun->backend.dev.csw == NULL)
1385		panic("Unable to retrieve device switch");
1386
1387	error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
1388	if (error) {
1389		snprintf(req->error_str, sizeof(req->error_str),
1390			 "%s: error getting vnode attributes for device %s",
1391			 __func__, be_lun->dev_path);
1392		return (error);
1393	}
1394
1395	dev = be_lun->vn->v_rdev;
1396	devsw = dev->si_devsw;
1397	if (!devsw->d_ioctl) {
1398		snprintf(req->error_str, sizeof(req->error_str),
1399			 "%s: no d_ioctl for device %s!", __func__,
1400			 be_lun->dev_path);
1401		return (ENODEV);
1402	}
1403
1404	error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
1405			       (caddr_t)&be_lun->blocksize, FREAD,
1406			       curthread);
1407	if (error) {
1408		snprintf(req->error_str, sizeof(req->error_str),
1409			 "%s: error %d returned for DIOCGSECTORSIZE ioctl "
1410			 "on %s!", __func__, error, be_lun->dev_path);
1411		return (error);
1412	}
1413
1414	/*
1415	 * If the user has asked for a blocksize that is greater than the
1416	 * backing device's blocksize, we can do it only if the blocksize
1417	 * the user is asking for is an even multiple of the underlying
1418	 * device's blocksize.
1419	 */
1420	if ((params->blocksize_bytes != 0)
1421	 && (params->blocksize_bytes > be_lun->blocksize)) {
1422		uint32_t bs_multiple, tmp_blocksize;
1423
1424		bs_multiple = params->blocksize_bytes / be_lun->blocksize;
1425
1426		tmp_blocksize = bs_multiple * be_lun->blocksize;
1427
1428		if (tmp_blocksize == params->blocksize_bytes) {
1429			be_lun->blocksize = params->blocksize_bytes;
1430		} else {
1431			snprintf(req->error_str, sizeof(req->error_str),
1432				 "%s: requested blocksize %u is not an even "
1433				 "multiple of backing device blocksize %u",
1434				 __func__, params->blocksize_bytes,
1435				 be_lun->blocksize);
1436			return (EINVAL);
1437
1438		}
1439	} else if ((params->blocksize_bytes != 0)
1440		&& (params->blocksize_bytes != be_lun->blocksize)) {
1441		snprintf(req->error_str, sizeof(req->error_str),
1442			 "%s: requested blocksize %u < backing device "
1443			 "blocksize %u", __func__, params->blocksize_bytes,
1444			 be_lun->blocksize);
1445		return (EINVAL);
1446	}
1447
1448	error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
1449			       (caddr_t)&be_lun->size_bytes, FREAD,
1450			       curthread);
1451	if (error) {
1452		snprintf(req->error_str, sizeof(req->error_str),
1453			 "%s: error %d returned for DIOCGMEDIASIZE "
1454			 " ioctl on %s!", __func__, error,
1455			 be_lun->dev_path);
1456		return (error);
1457	}
1458
1459	if (params->lun_size_bytes != 0) {
1460		if (params->lun_size_bytes > be_lun->size_bytes) {
1461			snprintf(req->error_str, sizeof(req->error_str),
1462				 "%s: requested LUN size %ju > backing device "
1463				 "size %ju", __func__,
1464				 (uintmax_t)params->lun_size_bytes,
1465				 (uintmax_t)be_lun->size_bytes);
1466			return (EINVAL);
1467		}
1468
1469		be_lun->size_bytes = params->lun_size_bytes;
1470	}
1471
1472	return (0);
1473}
1474
1475static int
1476ctl_be_block_close(struct ctl_be_block_lun *be_lun)
1477{
1478	DROP_GIANT();
1479	if (be_lun->vn) {
1480		int flags = FREAD | FWRITE;
1481		int vfs_is_locked = 0;
1482
1483		switch (be_lun->dev_type) {
1484		case CTL_BE_BLOCK_DEV:
1485			if (be_lun->backend.dev.csw) {
1486				dev_relthread(be_lun->backend.dev.cdev,
1487					      be_lun->backend.dev.dev_ref);
1488				be_lun->backend.dev.csw  = NULL;
1489				be_lun->backend.dev.cdev = NULL;
1490			}
1491			break;
1492		case CTL_BE_BLOCK_FILE:
1493			vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
1494			break;
1495		case CTL_BE_BLOCK_NONE:
1496		default:
1497			panic("Unexpected backend type.");
1498			break;
1499		}
1500
1501		(void)vn_close(be_lun->vn, flags, NOCRED, curthread);
1502		be_lun->vn = NULL;
1503
1504		switch (be_lun->dev_type) {
1505		case CTL_BE_BLOCK_DEV:
1506			break;
1507		case CTL_BE_BLOCK_FILE:
1508			VFS_UNLOCK_GIANT(vfs_is_locked);
1509			if (be_lun->backend.file.cred != NULL) {
1510				crfree(be_lun->backend.file.cred);
1511				be_lun->backend.file.cred = NULL;
1512			}
1513			break;
1514		case CTL_BE_BLOCK_NONE:
1515		default:
1516			panic("Unexpected backend type.");
1517			break;
1518		}
1519	}
1520	PICKUP_GIANT();
1521
1522	return (0);
1523}
1524
1525static int
1526ctl_be_block_open(struct ctl_be_block_softc *softc,
1527		       struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1528{
1529	struct nameidata nd;
1530	int		 flags;
1531	int		 error;
1532	int		 vfs_is_locked;
1533
1534	/*
1535	 * XXX KDM allow a read-only option?
1536	 */
1537	flags = FREAD | FWRITE;
1538	error = 0;
1539
1540	if (rootvnode == NULL) {
1541		snprintf(req->error_str, sizeof(req->error_str),
1542			 "%s: Root filesystem is not mounted", __func__);
1543		return (1);
1544	}
1545
1546	if (!curthread->td_proc->p_fd->fd_cdir) {
1547		curthread->td_proc->p_fd->fd_cdir = rootvnode;
1548		VREF(rootvnode);
1549	}
1550	if (!curthread->td_proc->p_fd->fd_rdir) {
1551		curthread->td_proc->p_fd->fd_rdir = rootvnode;
1552		VREF(rootvnode);
1553	}
1554	if (!curthread->td_proc->p_fd->fd_jdir) {
1555		curthread->td_proc->p_fd->fd_jdir = rootvnode;
1556		VREF(rootvnode);
1557	}
1558
1559 again:
1560	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
1561	error = vn_open(&nd, &flags, 0, NULL);
1562	if (error) {
1563		/*
1564		 * This is the only reasonable guess we can make as far as
1565		 * path if the user doesn't give us a fully qualified path.
1566		 * If they want to specify a file, they need to specify the
1567		 * full path.
1568		 */
1569		if (be_lun->dev_path[0] != '/') {
1570			char *dev_path = "/dev/";
1571			char *dev_name;
1572
1573			/* Try adding device path at beginning of name */
1574			dev_name = malloc(strlen(be_lun->dev_path)
1575					+ strlen(dev_path) + 1,
1576					  M_CTLBLK, M_WAITOK);
1577			if (dev_name) {
1578				sprintf(dev_name, "%s%s", dev_path,
1579					be_lun->dev_path);
1580				free(be_lun->dev_path, M_CTLBLK);
1581				be_lun->dev_path = dev_name;
1582				goto again;
1583			}
1584		}
1585		snprintf(req->error_str, sizeof(req->error_str),
1586			 "%s: error opening %s", __func__, be_lun->dev_path);
1587		return (error);
1588	}
1589
1590	vfs_is_locked = NDHASGIANT(&nd);
1591
1592	NDFREE(&nd, NDF_ONLY_PNBUF);
1593
1594	be_lun->vn = nd.ni_vp;
1595
1596	/* We only support disks and files. */
1597	if (vn_isdisk(be_lun->vn, &error)) {
1598		error = ctl_be_block_open_dev(be_lun, req);
1599	} else if (be_lun->vn->v_type == VREG) {
1600		error = ctl_be_block_open_file(be_lun, req);
1601	} else {
1602		error = EINVAL;
1603		snprintf(req->error_str, sizeof(req->error_str),
1604			 "%s is not a disk or file", be_lun->dev_path);
1605	}
1606	VOP_UNLOCK(be_lun->vn, 0);
1607	VFS_UNLOCK_GIANT(vfs_is_locked);
1608
1609	if (error != 0) {
1610		ctl_be_block_close(be_lun);
1611		return (error);
1612	}
1613
1614	be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
1615	be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
1616
1617	return (0);
1618}
1619
1620static int
1621ctl_be_block_mem_ctor(void *mem, int size, void *arg, int flags)
1622{
1623	return (0);
1624}
1625
1626static void
1627ctl_be_block_mem_dtor(void *mem, int size, void *arg)
1628{
1629	bzero(mem, size);
1630}
1631
1632static int
1633ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1634{
1635	struct ctl_be_block_lun *be_lun;
1636	struct ctl_lun_create_params *params;
1637	struct ctl_be_arg *file_arg;
1638	char tmpstr[32];
1639	int retval, num_threads;
1640	int i;
1641
1642	params = &req->reqdata.create;
1643	retval = 0;
1644
1645	num_threads = cbb_num_threads;
1646
1647	file_arg = NULL;
1648
1649	be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
1650
1651	be_lun->softc = softc;
1652	STAILQ_INIT(&be_lun->input_queue);
1653	STAILQ_INIT(&be_lun->config_write_queue);
1654	STAILQ_INIT(&be_lun->datamove_queue);
1655	sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
1656	mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
1657
1658	be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS,
1659	    ctl_be_block_mem_ctor, ctl_be_block_mem_dtor, NULL, NULL,
1660	    /*align*/ 0, /*flags*/0);
1661
1662	if (be_lun->lun_zone == NULL) {
1663		snprintf(req->error_str, sizeof(req->error_str),
1664			 "%s: error allocating UMA zone", __func__);
1665		goto bailout_error;
1666	}
1667
1668	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
1669		be_lun->ctl_be_lun.lun_type = params->device_type;
1670	else
1671		be_lun->ctl_be_lun.lun_type = T_DIRECT;
1672
1673	if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
1674		for (i = 0; i < req->num_be_args; i++) {
1675			if (strcmp(req->kern_be_args[i].name, "file") == 0) {
1676				file_arg = &req->kern_be_args[i];
1677				break;
1678			}
1679		}
1680
1681		if (file_arg == NULL) {
1682			snprintf(req->error_str, sizeof(req->error_str),
1683				 "%s: no file argument specified", __func__);
1684			goto bailout_error;
1685		}
1686
1687		be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK,
1688					  M_WAITOK | M_ZERO);
1689
1690		strlcpy(be_lun->dev_path, (char *)file_arg->value,
1691			file_arg->vallen);
1692
1693		retval = ctl_be_block_open(softc, be_lun, req);
1694		if (retval != 0) {
1695			retval = 0;
1696			goto bailout_error;
1697		}
1698
1699		/*
1700		 * Tell the user the size of the file/device.
1701		 */
1702		params->lun_size_bytes = be_lun->size_bytes;
1703
1704		/*
1705		 * The maximum LBA is the size - 1.
1706		 */
1707		be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
1708	} else {
1709		/*
1710		 * For processor devices, we don't have any size.
1711		 */
1712		be_lun->blocksize = 0;
1713		be_lun->size_blocks = 0;
1714		be_lun->size_bytes = 0;
1715		be_lun->ctl_be_lun.maxlba = 0;
1716		params->lun_size_bytes = 0;
1717
1718		/*
1719		 * Default to just 1 thread for processor devices.
1720		 */
1721		num_threads = 1;
1722	}
1723
1724	/*
1725	 * XXX This searching loop might be refactored to be combined with
1726	 * the loop above,
1727	 */
1728	for (i = 0; i < req->num_be_args; i++) {
1729		if (strcmp(req->kern_be_args[i].name, "num_threads") == 0) {
1730			struct ctl_be_arg *thread_arg;
1731			char num_thread_str[16];
1732			int tmp_num_threads;
1733
1734
1735			thread_arg = &req->kern_be_args[i];
1736
1737			strlcpy(num_thread_str, (char *)thread_arg->value,
1738				min(thread_arg->vallen,
1739				sizeof(num_thread_str)));
1740
1741			tmp_num_threads = strtol(num_thread_str, NULL, 0);
1742
1743			/*
1744			 * We don't let the user specify less than one
1745			 * thread, but hope he's clueful enough not to
1746			 * specify 1000 threads.
1747			 */
1748			if (tmp_num_threads < 1) {
1749				snprintf(req->error_str, sizeof(req->error_str),
1750					 "%s: invalid number of threads %s",
1751				         __func__, num_thread_str);
1752				goto bailout_error;
1753			}
1754
1755			num_threads = tmp_num_threads;
1756		}
1757	}
1758
1759	be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
1760	be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
1761	be_lun->ctl_be_lun.be_lun = be_lun;
1762	be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
1763	/* Tell the user the blocksize we ended up using */
1764	params->blocksize_bytes = be_lun->blocksize;
1765	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1766		be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
1767		be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
1768	} else
1769		be_lun->ctl_be_lun.req_lun_id = 0;
1770
1771	be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown;
1772	be_lun->ctl_be_lun.lun_config_status =
1773		ctl_be_block_lun_config_status;
1774	be_lun->ctl_be_lun.be = &ctl_be_block_driver;
1775
1776	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1777		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
1778			 softc->num_luns);
1779		strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
1780			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
1781			sizeof(tmpstr)));
1782
1783		/* Tell the user what we used for a serial number */
1784		strncpy((char *)params->serial_num, tmpstr,
1785			ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
1786	} else {
1787		strncpy((char *)be_lun->ctl_be_lun.serial_num,
1788			params->serial_num,
1789			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
1790			sizeof(params->serial_num)));
1791	}
1792	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1793		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
1794		strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
1795			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
1796			sizeof(tmpstr)));
1797
1798		/* Tell the user what we used for a device ID */
1799		strncpy((char *)params->device_id, tmpstr,
1800			ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
1801	} else {
1802		strncpy((char *)be_lun->ctl_be_lun.device_id,
1803			params->device_id,
1804			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
1805				sizeof(params->device_id)));
1806	}
1807
1808	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
1809
1810	be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
1811	    taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1812
1813	if (be_lun->io_taskqueue == NULL) {
1814		snprintf(req->error_str, sizeof(req->error_str),
1815			 "%s: Unable to create taskqueue", __func__);
1816		goto bailout_error;
1817	}
1818
1819	/*
1820	 * Note that we start the same number of threads by default for
1821	 * both the file case and the block device case.  For the file
1822	 * case, we need multiple threads to allow concurrency, because the
1823	 * vnode interface is designed to be a blocking interface.  For the
1824	 * block device case, ZFS zvols at least will block the caller's
1825	 * context in many instances, and so we need multiple threads to
1826	 * overcome that problem.  Other block devices don't need as many
1827	 * threads, but they shouldn't cause too many problems.
1828	 *
1829	 * If the user wants to just have a single thread for a block
1830	 * device, he can specify that when the LUN is created, or change
1831	 * the tunable/sysctl to alter the default number of threads.
1832	 */
1833	retval = taskqueue_start_threads(&be_lun->io_taskqueue,
1834					 /*num threads*/num_threads,
1835					 /*priority*/PWAIT,
1836					 /*thread name*/
1837					 "%s taskq", be_lun->lunname);
1838
1839	if (retval != 0)
1840		goto bailout_error;
1841
1842	be_lun->num_threads = num_threads;
1843
1844	mtx_lock(&softc->lock);
1845	softc->num_luns++;
1846	STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
1847
1848	mtx_unlock(&softc->lock);
1849
1850	retval = ctl_add_lun(&be_lun->ctl_be_lun);
1851	if (retval != 0) {
1852		mtx_lock(&softc->lock);
1853		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
1854			      links);
1855		softc->num_luns--;
1856		mtx_unlock(&softc->lock);
1857		snprintf(req->error_str, sizeof(req->error_str),
1858			 "%s: ctl_add_lun() returned error %d, see dmesg for "
1859			"details", __func__, retval);
1860		retval = 0;
1861		goto bailout_error;
1862	}
1863
1864	mtx_lock(&softc->lock);
1865
1866	/*
1867	 * Tell the config_status routine that we're waiting so it won't
1868	 * clean up the LUN in the event of an error.
1869	 */
1870	be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
1871
1872	while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) {
1873		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
1874		if (retval == EINTR)
1875			break;
1876	}
1877	be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
1878
1879	if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) {
1880		snprintf(req->error_str, sizeof(req->error_str),
1881			 "%s: LUN configuration error, see dmesg for details",
1882			 __func__);
1883		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
1884			      links);
1885		softc->num_luns--;
1886		mtx_unlock(&softc->lock);
1887		goto bailout_error;
1888	} else {
1889		params->req_lun_id = be_lun->ctl_be_lun.lun_id;
1890	}
1891
1892	mtx_unlock(&softc->lock);
1893
1894	be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id,
1895					       be_lun->blocksize,
1896					       DEVSTAT_ALL_SUPPORTED,
1897					       be_lun->ctl_be_lun.lun_type
1898					       | DEVSTAT_TYPE_IF_OTHER,
1899					       DEVSTAT_PRIORITY_OTHER);
1900
1901
1902	req->status = CTL_LUN_OK;
1903
1904	return (retval);
1905
1906bailout_error:
1907	req->status = CTL_LUN_ERROR;
1908
1909	ctl_be_block_close(be_lun);
1910
1911	free(be_lun->dev_path, M_CTLBLK);
1912	free(be_lun, M_CTLBLK);
1913
1914	return (retval);
1915}
1916
1917static int
1918ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1919{
1920	struct ctl_lun_rm_params *params;
1921	struct ctl_be_block_lun *be_lun;
1922	int retval;
1923
1924	params = &req->reqdata.rm;
1925
1926	mtx_lock(&softc->lock);
1927
1928	be_lun = NULL;
1929
1930	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
1931		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
1932			break;
1933	}
1934	mtx_unlock(&softc->lock);
1935
1936	if (be_lun == NULL) {
1937		snprintf(req->error_str, sizeof(req->error_str),
1938			 "%s: LUN %u is not managed by the block backend",
1939			 __func__, params->lun_id);
1940		goto bailout_error;
1941	}
1942
1943	retval = ctl_disable_lun(&be_lun->ctl_be_lun);
1944
1945	if (retval != 0) {
1946		snprintf(req->error_str, sizeof(req->error_str),
1947			 "%s: error %d returned from ctl_disable_lun() for "
1948			 "LUN %d", __func__, retval, params->lun_id);
1949		goto bailout_error;
1950
1951	}
1952
1953	retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
1954	if (retval != 0) {
1955		snprintf(req->error_str, sizeof(req->error_str),
1956			 "%s: error %d returned from ctl_invalidate_lun() for "
1957			 "LUN %d", __func__, retval, params->lun_id);
1958		goto bailout_error;
1959	}
1960
1961	mtx_lock(&softc->lock);
1962
1963	be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
1964
1965	while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
1966                retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
1967                if (retval == EINTR)
1968                        break;
1969        }
1970
1971	be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
1972
1973	if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
1974		snprintf(req->error_str, sizeof(req->error_str),
1975			 "%s: interrupted waiting for LUN to be freed",
1976			 __func__);
1977		mtx_unlock(&softc->lock);
1978		goto bailout_error;
1979	}
1980
1981	STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links);
1982
1983	softc->num_luns--;
1984	mtx_unlock(&softc->lock);
1985
1986	taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
1987
1988	taskqueue_free(be_lun->io_taskqueue);
1989
1990	ctl_be_block_close(be_lun);
1991
1992	if (be_lun->disk_stats != NULL)
1993		devstat_remove_entry(be_lun->disk_stats);
1994
1995	uma_zdestroy(be_lun->lun_zone);
1996
1997	free(be_lun->dev_path, M_CTLBLK);
1998
1999	free(be_lun, M_CTLBLK);
2000
2001	req->status = CTL_LUN_OK;
2002
2003	return (0);
2004
2005bailout_error:
2006
2007	req->status = CTL_LUN_ERROR;
2008
2009	return (0);
2010}
2011
2012static int
2013ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
2014			 struct ctl_lun_req *req)
2015{
2016	struct vattr vattr;
2017	int error;
2018	struct ctl_lun_modify_params *params;
2019
2020	params = &req->reqdata.modify;
2021
2022	if (params->lun_size_bytes != 0) {
2023		be_lun->size_bytes = params->lun_size_bytes;
2024	} else  {
2025		error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
2026		if (error != 0) {
2027			snprintf(req->error_str, sizeof(req->error_str),
2028				 "error calling VOP_GETATTR() for file %s",
2029				 be_lun->dev_path);
2030			return (error);
2031		}
2032
2033		be_lun->size_bytes = vattr.va_size;
2034	}
2035
2036	return (0);
2037}
2038
2039static int
2040ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
2041			struct ctl_lun_req *req)
2042{
2043	struct cdev *dev;
2044	struct cdevsw *devsw;
2045	int error;
2046	struct ctl_lun_modify_params *params;
2047	uint64_t size_bytes;
2048
2049	params = &req->reqdata.modify;
2050
2051	dev = be_lun->vn->v_rdev;
2052	devsw = dev->si_devsw;
2053	if (!devsw->d_ioctl) {
2054		snprintf(req->error_str, sizeof(req->error_str),
2055			 "%s: no d_ioctl for device %s!", __func__,
2056			 be_lun->dev_path);
2057		return (ENODEV);
2058	}
2059
2060	error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
2061			       (caddr_t)&size_bytes, FREAD,
2062			       curthread);
2063	if (error) {
2064		snprintf(req->error_str, sizeof(req->error_str),
2065			 "%s: error %d returned for DIOCGMEDIASIZE ioctl "
2066			 "on %s!", __func__, error, be_lun->dev_path);
2067		return (error);
2068	}
2069
2070	if (params->lun_size_bytes != 0) {
2071		if (params->lun_size_bytes > size_bytes) {
2072			snprintf(req->error_str, sizeof(req->error_str),
2073				 "%s: requested LUN size %ju > backing device "
2074				 "size %ju", __func__,
2075				 (uintmax_t)params->lun_size_bytes,
2076				 (uintmax_t)size_bytes);
2077			return (EINVAL);
2078		}
2079
2080		be_lun->size_bytes = params->lun_size_bytes;
2081	} else {
2082		be_lun->size_bytes = size_bytes;
2083	}
2084
2085	return (0);
2086}
2087
2088static int
2089ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2090{
2091	struct ctl_lun_modify_params *params;
2092	struct ctl_be_block_lun *be_lun;
2093	int vfs_is_locked, error;
2094
2095	params = &req->reqdata.modify;
2096
2097	mtx_lock(&softc->lock);
2098
2099	be_lun = NULL;
2100
2101	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
2102		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
2103			break;
2104	}
2105	mtx_unlock(&softc->lock);
2106
2107	if (be_lun == NULL) {
2108		snprintf(req->error_str, sizeof(req->error_str),
2109			 "%s: LUN %u is not managed by the block backend",
2110			 __func__, params->lun_id);
2111		goto bailout_error;
2112	}
2113
2114	if (params->lun_size_bytes != 0) {
2115		if (params->lun_size_bytes < be_lun->blocksize) {
2116			snprintf(req->error_str, sizeof(req->error_str),
2117				"%s: LUN size %ju < blocksize %u", __func__,
2118				params->lun_size_bytes, be_lun->blocksize);
2119			goto bailout_error;
2120		}
2121	}
2122
2123	vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
2124	vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
2125
2126	if (be_lun->vn->v_type == VREG)
2127		error = ctl_be_block_modify_file(be_lun, req);
2128	else
2129		error = ctl_be_block_modify_dev(be_lun, req);
2130
2131	VOP_UNLOCK(be_lun->vn, 0);
2132	VFS_UNLOCK_GIANT(vfs_is_locked);
2133
2134	if (error != 0)
2135		goto bailout_error;
2136
2137	be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
2138
2139	/*
2140	 * The maximum LBA is the size - 1.
2141	 *
2142	 * XXX: Note that this field is being updated without locking,
2143	 * 	which might cause problems on 32-bit architectures.
2144	 */
2145	be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
2146	ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
2147
2148	/* Tell the user the exact size we ended up using */
2149	params->lun_size_bytes = be_lun->size_bytes;
2150
2151	req->status = CTL_LUN_OK;
2152
2153	return (0);
2154
2155bailout_error:
2156	req->status = CTL_LUN_ERROR;
2157
2158	return (0);
2159}
2160
2161static void
2162ctl_be_block_lun_shutdown(void *be_lun)
2163{
2164	struct ctl_be_block_lun *lun;
2165	struct ctl_be_block_softc *softc;
2166
2167	lun = (struct ctl_be_block_lun *)be_lun;
2168
2169	softc = lun->softc;
2170
2171	mtx_lock(&softc->lock);
2172	lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED;
2173	if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2174		wakeup(lun);
2175	mtx_unlock(&softc->lock);
2176
2177}
2178
2179static void
2180ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status)
2181{
2182	struct ctl_be_block_lun *lun;
2183	struct ctl_be_block_softc *softc;
2184
2185	lun = (struct ctl_be_block_lun *)be_lun;
2186	softc = lun->softc;
2187
2188	if (status == CTL_LUN_CONFIG_OK) {
2189		mtx_lock(&softc->lock);
2190		lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2191		if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2192			wakeup(lun);
2193		mtx_unlock(&softc->lock);
2194
2195		/*
2196		 * We successfully added the LUN, attempt to enable it.
2197		 */
2198		if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
2199			printf("%s: ctl_enable_lun() failed!\n", __func__);
2200			if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
2201				printf("%s: ctl_invalidate_lun() failed!\n",
2202				       __func__);
2203			}
2204		}
2205
2206		return;
2207	}
2208
2209
2210	mtx_lock(&softc->lock);
2211	lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2212	lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR;
2213	wakeup(lun);
2214	mtx_unlock(&softc->lock);
2215}
2216
2217
2218static int
2219ctl_be_block_config_write(union ctl_io *io)
2220{
2221	struct ctl_be_block_lun *be_lun;
2222	struct ctl_be_lun *ctl_be_lun;
2223	int retval;
2224
2225	retval = 0;
2226
2227	DPRINTF("entered\n");
2228
2229	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
2230		CTL_PRIV_BACKEND_LUN].ptr;
2231	be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
2232
2233	switch (io->scsiio.cdb[0]) {
2234	case SYNCHRONIZE_CACHE:
2235	case SYNCHRONIZE_CACHE_16:
2236		/*
2237		 * The upper level CTL code will filter out any CDBs with
2238		 * the immediate bit set and return the proper error.
2239		 *
2240		 * We don't really need to worry about what LBA range the
2241		 * user asked to be synced out.  When they issue a sync
2242		 * cache command, we'll sync out the whole thing.
2243		 */
2244		mtx_lock(&be_lun->lock);
2245		STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
2246				   links);
2247		mtx_unlock(&be_lun->lock);
2248		taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
2249		break;
2250	case START_STOP_UNIT: {
2251		struct scsi_start_stop_unit *cdb;
2252
2253		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
2254
2255		if (cdb->how & SSS_START)
2256			retval = ctl_start_lun(ctl_be_lun);
2257		else {
2258			retval = ctl_stop_lun(ctl_be_lun);
2259			/*
2260			 * XXX KDM Copan-specific offline behavior.
2261			 * Figure out a reasonable way to port this?
2262			 */
2263#ifdef NEEDTOPORT
2264			if ((retval == 0)
2265			 && (cdb->byte2 & SSS_ONOFFLINE))
2266				retval = ctl_lun_offline(ctl_be_lun);
2267#endif
2268		}
2269
2270		/*
2271		 * In general, the above routines should not fail.  They
2272		 * just set state for the LUN.  So we've got something
2273		 * pretty wrong here if we can't start or stop the LUN.
2274		 */
2275		if (retval != 0) {
2276			ctl_set_internal_failure(&io->scsiio,
2277						 /*sks_valid*/ 1,
2278						 /*retry_count*/ 0xf051);
2279			retval = CTL_RETVAL_COMPLETE;
2280		} else {
2281			ctl_set_success(&io->scsiio);
2282		}
2283		ctl_config_write_done(io);
2284		break;
2285	}
2286	default:
2287		ctl_set_invalid_opcode(&io->scsiio);
2288		ctl_config_write_done(io);
2289		retval = CTL_RETVAL_COMPLETE;
2290		break;
2291	}
2292
2293	return (retval);
2294
2295}
2296
2297static int
2298ctl_be_block_config_read(union ctl_io *io)
2299{
2300	return (0);
2301}
2302
2303static int
2304ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
2305{
2306	struct ctl_be_block_lun *lun;
2307	int retval;
2308
2309	lun = (struct ctl_be_block_lun *)be_lun;
2310	retval = 0;
2311
2312	retval = sbuf_printf(sb, "<num_threads>");
2313
2314	if (retval != 0)
2315		goto bailout;
2316
2317	retval = sbuf_printf(sb, "%d", lun->num_threads);
2318
2319	if (retval != 0)
2320		goto bailout;
2321
2322	retval = sbuf_printf(sb, "</num_threads>");
2323
2324	/*
2325	 * For processor devices, we don't have a path variable.
2326	 */
2327	if ((retval != 0)
2328	 || (lun->dev_path == NULL))
2329		goto bailout;
2330
2331	retval = sbuf_printf(sb, "<file>");
2332
2333	if (retval != 0)
2334		goto bailout;
2335
2336	retval = ctl_sbuf_printf_esc(sb, lun->dev_path);
2337
2338	if (retval != 0)
2339		goto bailout;
2340
2341	retval = sbuf_printf(sb, "</file>\n");
2342
2343bailout:
2344
2345	return (retval);
2346}
2347
2348int
2349ctl_be_block_init(void)
2350{
2351	struct ctl_be_block_softc *softc;
2352	int retval;
2353
2354	softc = &backend_block_softc;
2355	retval = 0;
2356
2357	mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF);
2358	STAILQ_INIT(&softc->beio_free_queue);
2359	STAILQ_INIT(&softc->disk_list);
2360	STAILQ_INIT(&softc->lun_list);
2361	ctl_grow_beio(softc, 200);
2362
2363	return (retval);
2364}
2365