Deleted Added
full compact
1/*-
2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
4 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * substantially similar to the "NO WARRANTY" disclaimer below
18 * ("Disclaimer") and any redistribution must be conditioned upon
19 * including a substantially similar Disclaimer requirement for further
20 * binary redistribution.
21 *
22 * NO WARRANTY
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGES.
34 *
35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
36 */
37/*
38 * CAM Target Layer backend for a "fake" ramdisk.
39 *
40 * Author: Ken Merry <ken@FreeBSD.org>
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: stable/11/sys/cam/ctl/ctl_backend_ramdisk.c 288427 2015-09-30 20:38:35Z mav $");
44__FBSDID("$FreeBSD: stable/11/sys/cam/ctl/ctl_backend_ramdisk.c 312834 2017-01-26 20:49:19Z mav $");
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/condvar.h>
50#include <sys/types.h>
51#include <sys/lock.h>
52#include <sys/mutex.h>
53#include <sys/malloc.h>
54#include <sys/taskqueue.h>
55#include <sys/time.h>
56#include <sys/queue.h>
57#include <sys/conf.h>
58#include <sys/ioccom.h>
59#include <sys/module.h>
60#include <sys/sysctl.h>
61
62#include <cam/scsi/scsi_all.h>
63#include <cam/scsi/scsi_da.h>
64#include <cam/ctl/ctl_io.h>
65#include <cam/ctl/ctl.h>
66#include <cam/ctl/ctl_util.h>
67#include <cam/ctl/ctl_backend.h>
68#include <cam/ctl/ctl_debug.h>
69#include <cam/ctl/ctl_ioctl.h>
70#include <cam/ctl/ctl_ha.h>
71#include <cam/ctl/ctl_private.h>
72#include <cam/ctl/ctl_error.h>
73
74typedef enum {
75 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
76 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02,
77 CTL_BE_RAMDISK_LUN_WAITING = 0x04
78} ctl_be_ramdisk_lun_flags;
79
80struct ctl_be_ramdisk_lun {
81 struct ctl_lun_create_params params;
82 char lunname[32];
83 uint64_t size_bytes;
84 uint64_t size_blocks;
85 struct ctl_be_ramdisk_softc *softc;
86 ctl_be_ramdisk_lun_flags flags;
87 STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
88 struct ctl_be_lun cbe_lun;
89 struct taskqueue *io_taskqueue;
90 struct task io_task;
91 STAILQ_HEAD(, ctl_io_hdr) cont_queue;
92 struct mtx_padalign queue_lock;
93};
94
95struct ctl_be_ramdisk_softc {
96 struct mtx lock;
97 int rd_size;
98#ifdef CTL_RAMDISK_PAGES
99 uint8_t **ramdisk_pages;
100 int num_pages;
101#else
102 uint8_t *ramdisk_buffer;
103#endif
104 int num_luns;
105 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
106};
107
108static struct ctl_be_ramdisk_softc rd_softc;
109extern struct ctl_softc *control_softc;
110
111int ctl_backend_ramdisk_init(void);
112void ctl_backend_ramdisk_shutdown(void);
113static int ctl_backend_ramdisk_move_done(union ctl_io *io);
114static int ctl_backend_ramdisk_submit(union ctl_io *io);
115static void ctl_backend_ramdisk_continue(union ctl_io *io);
116static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
117 caddr_t addr, int flag, struct thread *td);
118static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
119 struct ctl_lun_req *req);
120static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
121 struct ctl_lun_req *req);
122static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
123 struct ctl_lun_req *req);
124static void ctl_backend_ramdisk_worker(void *context, int pending);
125static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
126static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
127 ctl_lun_config_status status);
128static int ctl_backend_ramdisk_config_write(union ctl_io *io);
129static int ctl_backend_ramdisk_config_read(union ctl_io *io);
130
131static struct ctl_backend_driver ctl_be_ramdisk_driver =
132{
133 .name = "ramdisk",
134 .flags = CTL_BE_FLAG_HAS_CONFIG,
135 .init = ctl_backend_ramdisk_init,
136 .data_submit = ctl_backend_ramdisk_submit,
137 .data_move_done = ctl_backend_ramdisk_move_done,
138 .config_read = ctl_backend_ramdisk_config_read,
139 .config_write = ctl_backend_ramdisk_config_write,
140 .ioctl = ctl_backend_ramdisk_ioctl
141};
142
143MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
144CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
145
146int
147ctl_backend_ramdisk_init(void)
148{
149 struct ctl_be_ramdisk_softc *softc = &rd_softc;
150#ifdef CTL_RAMDISK_PAGES
151 int i;
152#endif
153
154 memset(softc, 0, sizeof(*softc));
155 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
156 STAILQ_INIT(&softc->lun_list);
157 softc->rd_size = 1024 * 1024;
158#ifdef CTL_RAMDISK_PAGES
159 softc->num_pages = softc->rd_size / PAGE_SIZE;
160 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
161 softc->num_pages, M_RAMDISK,
162 M_WAITOK);
163 for (i = 0; i < softc->num_pages; i++)
164 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
165#else
166 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
167 M_WAITOK);
168#endif
169
170 return (0);
171}
172
173void
174ctl_backend_ramdisk_shutdown(void)
175{
176 struct ctl_be_ramdisk_softc *softc = &rd_softc;
177 struct ctl_be_ramdisk_lun *lun, *next_lun;
178#ifdef CTL_RAMDISK_PAGES
179 int i;
180#endif
181
182 mtx_lock(&softc->lock);
183 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
184 /*
185 * Drop our lock here. Since ctl_invalidate_lun() can call
186 * back into us, this could potentially lead to a recursive
187 * lock of the same mutex, which would cause a hang.
188 */
189 mtx_unlock(&softc->lock);
190 ctl_disable_lun(&lun->cbe_lun);
191 ctl_invalidate_lun(&lun->cbe_lun);
192 mtx_lock(&softc->lock);
193 }
194 mtx_unlock(&softc->lock);
195
196#ifdef CTL_RAMDISK_PAGES
197 for (i = 0; i < softc->num_pages; i++)
198 free(softc->ramdisk_pages[i], M_RAMDISK);
199
200 free(softc->ramdisk_pages, M_RAMDISK);
201#else
202 free(softc->ramdisk_buffer, M_RAMDISK);
203#endif
204
205 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
206 printf("ctl_backend_ramdisk_shutdown: "
207 "ctl_backend_deregister() failed!\n");
208 }
209}
210
211static int
212ctl_backend_ramdisk_move_done(union ctl_io *io)
213{
214 struct ctl_be_lun *cbe_lun;
215 struct ctl_be_ramdisk_lun *be_lun;
216#ifdef CTL_TIME_IO
217 struct bintime cur_bt;
218#endif
219
220 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
221 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
222 CTL_PRIV_BACKEND_LUN].ptr;
221 cbe_lun = CTL_BACKEND_LUN(io);
222 be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
223#ifdef CTL_TIME_IO
224 getbinuptime(&cur_bt);
225 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
226 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
227#endif
228 io->io_hdr.num_dmas++;
229 if (io->scsiio.kern_sg_entries > 0)
230 free(io->scsiio.kern_data_ptr, M_RAMDISK);
231 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
232 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
233 ;
234 } else if ((io->io_hdr.port_status == 0) &&
235 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
236 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
237 mtx_lock(&be_lun->queue_lock);
238 STAILQ_INSERT_TAIL(&be_lun->cont_queue,
239 &io->io_hdr, links);
240 mtx_unlock(&be_lun->queue_lock);
241 taskqueue_enqueue(be_lun->io_taskqueue,
242 &be_lun->io_task);
243 return (0);
244 }
245 ctl_set_success(&io->scsiio);
246 } else if ((io->io_hdr.port_status != 0) &&
247 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
248 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
249 /*
250 * For hardware error sense keys, the sense key
251 * specific value is defined to be a retry count,
252 * but we use it to pass back an internal FETD
253 * error code. XXX KDM Hopefully the FETD is only
254 * using 16 bits for an error code, since that's
255 * all the space we have in the sks field.
256 */
257 ctl_set_internal_failure(&io->scsiio,
258 /*sks_valid*/ 1,
259 /*retry_count*/
260 io->io_hdr.port_status);
261 }
262 ctl_data_submit_done(io);
263 return(0);
264}
265
266static int
267ctl_backend_ramdisk_submit(union ctl_io *io)
268{
269 struct ctl_be_lun *cbe_lun;
270 struct ctl_lba_len_flags *lbalen;
271
273 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
274 CTL_PRIV_BACKEND_LUN].ptr;
272 cbe_lun = CTL_BACKEND_LUN(io);
273 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
274 if (lbalen->flags & CTL_LLF_VERIFY) {
275 ctl_set_success(&io->scsiio);
276 ctl_data_submit_done(io);
277 return (CTL_RETVAL_COMPLETE);
278 }
279 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
280 lbalen->len * cbe_lun->blocksize;
281 ctl_backend_ramdisk_continue(io);
282 return (CTL_RETVAL_COMPLETE);
283}
284
285static void
286ctl_backend_ramdisk_continue(union ctl_io *io)
287{
288 struct ctl_be_ramdisk_softc *softc;
289 int len, len_filled, sg_filled;
290#ifdef CTL_RAMDISK_PAGES
291 struct ctl_sg_entry *sg_entries;
292 int i;
293#endif
294
295 softc = &rd_softc;
296 len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
297#ifdef CTL_RAMDISK_PAGES
298 sg_filled = min(btoc(len), softc->num_pages);
299 if (sg_filled > 1) {
300 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
301 sg_filled, M_RAMDISK,
302 M_WAITOK);
303 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
304 for (i = 0, len_filled = 0; i < sg_filled; i++) {
305 sg_entries[i].addr = softc->ramdisk_pages[i];
306 sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
307 len_filled += sg_entries[i].len;
308 }
309 } else {
310 sg_filled = 0;
311 len_filled = len;
312 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
313 }
314#else
315 sg_filled = 0;
316 len_filled = min(len, softc->rd_size);
317 io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
318#endif /* CTL_RAMDISK_PAGES */
319
320 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
321 io->scsiio.kern_data_resid = 0;
322 io->scsiio.kern_data_len = len_filled;
323 io->scsiio.kern_sg_entries = sg_filled;
324 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
325 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
326#ifdef CTL_TIME_IO
327 getbinuptime(&io->io_hdr.dma_start_bt);
328#endif
329 ctl_datamove(io);
330}
331
332static void
333ctl_backend_ramdisk_worker(void *context, int pending)
334{
335 struct ctl_be_ramdisk_lun *be_lun;
336 union ctl_io *io;
337
338 be_lun = (struct ctl_be_ramdisk_lun *)context;
339
340 mtx_lock(&be_lun->queue_lock);
341 for (;;) {
342 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
343 if (io != NULL) {
344 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
345 ctl_io_hdr, links);
346 mtx_unlock(&be_lun->queue_lock);
347 ctl_backend_ramdisk_continue(io);
348 mtx_lock(&be_lun->queue_lock);
349 continue;
350 }
351
352 /*
353 * If we get here, there is no work left in the queues, so
354 * just break out and let the task queue go to sleep.
355 */
356 break;
357 }
358 mtx_unlock(&be_lun->queue_lock);
359}
360
361static int
362ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
363 int flag, struct thread *td)
364{
365 struct ctl_be_ramdisk_softc *softc = &rd_softc;
366 struct ctl_lun_req *lun_req;
367 int retval;
368
369 retval = 0;
370 switch (cmd) {
371 case CTL_LUN_REQ:
372 lun_req = (struct ctl_lun_req *)addr;
373 switch (lun_req->reqtype) {
374 case CTL_LUNREQ_CREATE:
375 retval = ctl_backend_ramdisk_create(softc, lun_req);
376 break;
377 case CTL_LUNREQ_RM:
378 retval = ctl_backend_ramdisk_rm(softc, lun_req);
379 break;
380 case CTL_LUNREQ_MODIFY:
381 retval = ctl_backend_ramdisk_modify(softc, lun_req);
382 break;
383 default:
384 lun_req->status = CTL_LUN_ERROR;
385 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
386 "%s: invalid LUN request type %d", __func__,
387 lun_req->reqtype);
388 break;
389 }
390 break;
391 default:
392 retval = ENOTTY;
393 break;
394 }
395
396 return (retval);
397}
398
399static int
400ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
401 struct ctl_lun_req *req)
402{
403 struct ctl_be_ramdisk_lun *be_lun;
404 struct ctl_lun_rm_params *params;
405 int retval;
406
407 params = &req->reqdata.rm;
408 mtx_lock(&softc->lock);
409 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
410 if (be_lun->cbe_lun.lun_id == params->lun_id)
411 break;
412 }
413 mtx_unlock(&softc->lock);
414 if (be_lun == NULL) {
415 snprintf(req->error_str, sizeof(req->error_str),
416 "%s: LUN %u is not managed by the ramdisk backend",
417 __func__, params->lun_id);
418 goto bailout_error;
419 }
420
421 retval = ctl_disable_lun(&be_lun->cbe_lun);
422 if (retval != 0) {
423 snprintf(req->error_str, sizeof(req->error_str),
424 "%s: error %d returned from ctl_disable_lun() for "
425 "LUN %d", __func__, retval, params->lun_id);
426 goto bailout_error;
427 }
428
429 /*
430 * Set the waiting flag before we invalidate the LUN. Our shutdown
431 * routine can be called any time after we invalidate the LUN,
432 * and can be called from our context.
433 *
434 * This tells the shutdown routine that we're waiting, or we're
435 * going to wait for the shutdown to happen.
436 */
437 mtx_lock(&softc->lock);
438 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
439 mtx_unlock(&softc->lock);
440
441 retval = ctl_invalidate_lun(&be_lun->cbe_lun);
442 if (retval != 0) {
443 snprintf(req->error_str, sizeof(req->error_str),
444 "%s: error %d returned from ctl_invalidate_lun() for "
445 "LUN %d", __func__, retval, params->lun_id);
446 mtx_lock(&softc->lock);
447 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
448 mtx_unlock(&softc->lock);
449 goto bailout_error;
450 }
451
452 mtx_lock(&softc->lock);
453 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
454 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
455 if (retval == EINTR)
456 break;
457 }
458 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
459
460 /*
461 * We only remove this LUN from the list and free it (below) if
462 * retval == 0. If the user interrupted the wait, we just bail out
463 * without actually freeing the LUN. We let the shutdown routine
464 * free the LUN if that happens.
465 */
466 if (retval == 0) {
467 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
468 links);
469 softc->num_luns--;
470 }
471
472 mtx_unlock(&softc->lock);
473
474 if (retval == 0) {
475 taskqueue_drain_all(be_lun->io_taskqueue);
476 taskqueue_free(be_lun->io_taskqueue);
477 ctl_free_opts(&be_lun->cbe_lun.options);
478 mtx_destroy(&be_lun->queue_lock);
479 free(be_lun, M_RAMDISK);
480 }
481
482 req->status = CTL_LUN_OK;
483 return (retval);
484
485bailout_error:
486 req->status = CTL_LUN_ERROR;
487 return (0);
488}
489
490static int
491ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
492 struct ctl_lun_req *req)
493{
494 struct ctl_be_ramdisk_lun *be_lun;
495 struct ctl_be_lun *cbe_lun;
496 struct ctl_lun_create_params *params;
497 char *value;
498 char tmpstr[32];
499 int retval;
500
501 retval = 0;
502 params = &req->reqdata.create;
503
504 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
505 cbe_lun = &be_lun->cbe_lun;
506 cbe_lun->be_lun = be_lun;
507 be_lun->params = req->reqdata.create;
508 be_lun->softc = softc;
509 sprintf(be_lun->lunname, "cram%d", softc->num_luns);
510 ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
511
512 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
513 cbe_lun->lun_type = params->device_type;
514 else
515 cbe_lun->lun_type = T_DIRECT;
516 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
517 cbe_lun->flags = 0;
518 value = ctl_get_opt(&cbe_lun->options, "ha_role");
519 if (value != NULL) {
520 if (strcmp(value, "primary") == 0)
521 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
522 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
523 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
524
525 if (cbe_lun->lun_type == T_DIRECT ||
526 cbe_lun->lun_type == T_CDROM) {
527 if (params->blocksize_bytes != 0)
528 cbe_lun->blocksize = params->blocksize_bytes;
529 else if (cbe_lun->lun_type == T_CDROM)
530 cbe_lun->blocksize = 2048;
531 else
532 cbe_lun->blocksize = 512;
533 if (params->lun_size_bytes < cbe_lun->blocksize) {
534 snprintf(req->error_str, sizeof(req->error_str),
535 "%s: LUN size %ju < blocksize %u", __func__,
536 params->lun_size_bytes, cbe_lun->blocksize);
537 goto bailout_error;
538 }
539 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
540 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
541 cbe_lun->maxlba = be_lun->size_blocks - 1;
542 cbe_lun->atomicblock = UINT32_MAX;
543 cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize;
544 }
545
546 /* Tell the user the blocksize we ended up using */
547 params->blocksize_bytes = cbe_lun->blocksize;
548 params->lun_size_bytes = be_lun->size_bytes;
549
550 value = ctl_get_opt(&cbe_lun->options, "unmap");
551 if (value != NULL && strcmp(value, "on") == 0)
552 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
553 value = ctl_get_opt(&cbe_lun->options, "readonly");
554 if (value != NULL) {
555 if (strcmp(value, "on") == 0)
556 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
557 } else if (cbe_lun->lun_type != T_DIRECT)
558 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
559 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
560 value = ctl_get_opt(&cbe_lun->options, "serseq");
561 if (value != NULL && strcmp(value, "on") == 0)
562 cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
563 else if (value != NULL && strcmp(value, "read") == 0)
564 cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
565 else if (value != NULL && strcmp(value, "off") == 0)
566 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
567
568 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
569 cbe_lun->req_lun_id = params->req_lun_id;
570 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
571 } else
572 cbe_lun->req_lun_id = 0;
573
574 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
575 cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
576 cbe_lun->be = &ctl_be_ramdisk_driver;
577 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
578 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
579 softc->num_luns);
580 strncpy((char *)cbe_lun->serial_num, tmpstr,
581 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
582
583 /* Tell the user what we used for a serial number */
584 strncpy((char *)params->serial_num, tmpstr,
585 MIN(sizeof(params->serial_num), sizeof(tmpstr)));
586 } else {
587 strncpy((char *)cbe_lun->serial_num, params->serial_num,
588 MIN(sizeof(cbe_lun->serial_num),
589 sizeof(params->serial_num)));
590 }
591 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
592 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
593 strncpy((char *)cbe_lun->device_id, tmpstr,
594 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
595
596 /* Tell the user what we used for a device ID */
597 strncpy((char *)params->device_id, tmpstr,
598 MIN(sizeof(params->device_id), sizeof(tmpstr)));
599 } else {
600 strncpy((char *)cbe_lun->device_id, params->device_id,
601 MIN(sizeof(cbe_lun->device_id),
602 sizeof(params->device_id)));
603 }
604
605 STAILQ_INIT(&be_lun->cont_queue);
606 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
607 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
608 be_lun);
609
610 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
611 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
612 if (be_lun->io_taskqueue == NULL) {
613 snprintf(req->error_str, sizeof(req->error_str),
614 "%s: Unable to create taskqueue", __func__);
615 goto bailout_error;
616 }
617
618 retval = taskqueue_start_threads(&be_lun->io_taskqueue,
619 /*num threads*/1,
620 /*priority*/PWAIT,
621 /*thread name*/
622 "%s taskq", be_lun->lunname);
623 if (retval != 0)
624 goto bailout_error;
625
626 mtx_lock(&softc->lock);
627 softc->num_luns++;
628 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
629 mtx_unlock(&softc->lock);
630
631 retval = ctl_add_lun(&be_lun->cbe_lun);
632 if (retval != 0) {
633 mtx_lock(&softc->lock);
634 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
635 links);
636 softc->num_luns--;
637 mtx_unlock(&softc->lock);
638 snprintf(req->error_str, sizeof(req->error_str),
639 "%s: ctl_add_lun() returned error %d, see dmesg for "
640 "details", __func__, retval);
641 retval = 0;
642 goto bailout_error;
643 }
644
645 mtx_lock(&softc->lock);
646
647 /*
648 * Tell the config_status routine that we're waiting so it won't
649 * clean up the LUN in the event of an error.
650 */
651 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
652
653 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
654 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
655 if (retval == EINTR)
656 break;
657 }
658 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
659
660 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
661 snprintf(req->error_str, sizeof(req->error_str),
662 "%s: LUN configuration error, see dmesg for details",
663 __func__);
664 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
665 links);
666 softc->num_luns--;
667 mtx_unlock(&softc->lock);
668 goto bailout_error;
669 } else {
670 params->req_lun_id = cbe_lun->lun_id;
671 }
672 mtx_unlock(&softc->lock);
673
674 req->status = CTL_LUN_OK;
675 return (retval);
676
677bailout_error:
678 req->status = CTL_LUN_ERROR;
679 if (be_lun != NULL) {
680 if (be_lun->io_taskqueue != NULL) {
681 taskqueue_free(be_lun->io_taskqueue);
682 }
683 ctl_free_opts(&cbe_lun->options);
684 mtx_destroy(&be_lun->queue_lock);
685 free(be_lun, M_RAMDISK);
686 }
687 return (retval);
688}
689
690static int
691ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
692 struct ctl_lun_req *req)
693{
694 struct ctl_be_ramdisk_lun *be_lun;
695 struct ctl_be_lun *cbe_lun;
696 struct ctl_lun_modify_params *params;
697 char *value;
698 uint32_t blocksize;
699 int wasprim;
700
701 params = &req->reqdata.modify;
702
703 mtx_lock(&softc->lock);
704 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
705 if (be_lun->cbe_lun.lun_id == params->lun_id)
706 break;
707 }
708 mtx_unlock(&softc->lock);
709 if (be_lun == NULL) {
710 snprintf(req->error_str, sizeof(req->error_str),
711 "%s: LUN %u is not managed by the ramdisk backend",
712 __func__, params->lun_id);
713 goto bailout_error;
714 }
715 cbe_lun = &be_lun->cbe_lun;
716
717 if (params->lun_size_bytes != 0)
718 be_lun->params.lun_size_bytes = params->lun_size_bytes;
719 ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
720
721 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
722 value = ctl_get_opt(&cbe_lun->options, "ha_role");
723 if (value != NULL) {
724 if (strcmp(value, "primary") == 0)
725 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
726 else
727 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
728 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
729 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
730 else
731 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
732 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
733 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
734 ctl_lun_primary(cbe_lun);
735 else
736 ctl_lun_secondary(cbe_lun);
737 }
738
739 blocksize = be_lun->cbe_lun.blocksize;
740 if (be_lun->params.lun_size_bytes < blocksize) {
741 snprintf(req->error_str, sizeof(req->error_str),
742 "%s: LUN size %ju < blocksize %u", __func__,
743 be_lun->params.lun_size_bytes, blocksize);
744 goto bailout_error;
745 }
746 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
747 be_lun->size_bytes = be_lun->size_blocks * blocksize;
748 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
749 ctl_lun_capacity_changed(&be_lun->cbe_lun);
750
751 /* Tell the user the exact size we ended up using */
752 params->lun_size_bytes = be_lun->size_bytes;
753
754 req->status = CTL_LUN_OK;
755 return (0);
756
757bailout_error:
758 req->status = CTL_LUN_ERROR;
759 return (0);
760}
761
762static void
763ctl_backend_ramdisk_lun_shutdown(void *be_lun)
764{
765 struct ctl_be_ramdisk_lun *lun;
766 struct ctl_be_ramdisk_softc *softc;
767 int do_free;
768
769 lun = (struct ctl_be_ramdisk_lun *)be_lun;
770 softc = lun->softc;
771 do_free = 0;
772
773 mtx_lock(&softc->lock);
774 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
775 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
776 wakeup(lun);
777 } else {
778 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
779 links);
780 softc->num_luns--;
781 do_free = 1;
782 }
783 mtx_unlock(&softc->lock);
784
785 if (do_free != 0)
786 free(be_lun, M_RAMDISK);
787}
788
789static void
790ctl_backend_ramdisk_lun_config_status(void *be_lun,
791 ctl_lun_config_status status)
792{
793 struct ctl_be_ramdisk_lun *lun;
794 struct ctl_be_ramdisk_softc *softc;
795
796 lun = (struct ctl_be_ramdisk_lun *)be_lun;
797 softc = lun->softc;
798
799 if (status == CTL_LUN_CONFIG_OK) {
800 mtx_lock(&softc->lock);
801 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
802 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
803 wakeup(lun);
804 mtx_unlock(&softc->lock);
805
806 /*
807 * We successfully added the LUN, attempt to enable it.
808 */
809 if (ctl_enable_lun(&lun->cbe_lun) != 0) {
810 printf("%s: ctl_enable_lun() failed!\n", __func__);
811 if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
812 printf("%s: ctl_invalidate_lun() failed!\n",
813 __func__);
814 }
815 }
816
817 return;
818 }
819
820
821 mtx_lock(&softc->lock);
822 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
823
824 /*
825 * If we have a user waiting, let him handle the cleanup. If not,
826 * clean things up here.
827 */
828 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
829 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
830 wakeup(lun);
831 } else {
832 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
833 links);
834 softc->num_luns--;
835 free(lun, M_RAMDISK);
836 }
837 mtx_unlock(&softc->lock);
838}
839
840static int
841ctl_backend_ramdisk_config_write(union ctl_io *io)
842{
843 struct ctl_be_lun *cbe_lun;
844 int retval;
845
848 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
849 CTL_PRIV_BACKEND_LUN].ptr;
846 cbe_lun = CTL_BACKEND_LUN(io);
847 retval = 0;
848 switch (io->scsiio.cdb[0]) {
849 case SYNCHRONIZE_CACHE:
850 case SYNCHRONIZE_CACHE_16:
851 /*
852 * The upper level CTL code will filter out any CDBs with
853 * the immediate bit set and return the proper error. It
854 * will also not allow a sync cache command to go to a LUN
855 * that is powered down.
856 *
857 * We don't really need to worry about what LBA range the
858 * user asked to be synced out. When they issue a sync
859 * cache command, we'll sync out the whole thing.
860 *
861 * This is obviously just a stubbed out implementation.
862 * The real implementation will be in the RAIDCore/CTL
863 * interface, and can only really happen when RAIDCore
864 * implements a per-array cache sync.
865 */
866 ctl_set_success(&io->scsiio);
867 ctl_config_write_done(io);
868 break;
869 case START_STOP_UNIT: {
870 struct scsi_start_stop_unit *cdb;
871
872 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
873 if ((cdb->how & SSS_PC_MASK) != 0) {
874 ctl_set_success(&io->scsiio);
875 ctl_config_write_done(io);
876 break;
877 }
878 if (cdb->how & SSS_START) {
879 if (cdb->how & SSS_LOEJ)
880 ctl_lun_has_media(cbe_lun);
881 ctl_start_lun(cbe_lun);
882 } else {
883 ctl_stop_lun(cbe_lun);
884 if (cdb->how & SSS_LOEJ)
885 ctl_lun_ejected(cbe_lun);
886 }
887 ctl_set_success(&io->scsiio);
888 ctl_config_write_done(io);
889 break;
890 }
891 case PREVENT_ALLOW:
892 case WRITE_SAME_10:
893 case WRITE_SAME_16:
894 case UNMAP:
895 ctl_set_success(&io->scsiio);
896 ctl_config_write_done(io);
897 break;
898 default:
899 ctl_set_invalid_opcode(&io->scsiio);
900 ctl_config_write_done(io);
901 retval = CTL_RETVAL_COMPLETE;
902 break;
903 }
904
905 return (retval);
906}
907
908static int
909ctl_backend_ramdisk_config_read(union ctl_io *io)
910{
911 int retval = 0;
912
913 switch (io->scsiio.cdb[0]) {
914 case SERVICE_ACTION_IN:
915 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
916 /* We have nothing to tell, leave default data. */
917 ctl_config_read_done(io);
918 retval = CTL_RETVAL_COMPLETE;
919 break;
920 }
921 ctl_set_invalid_field(&io->scsiio,
922 /*sks_valid*/ 1,
923 /*command*/ 1,
924 /*field*/ 1,
925 /*bit_valid*/ 1,
926 /*bit*/ 4);
927 ctl_config_read_done(io);
928 retval = CTL_RETVAL_COMPLETE;
929 break;
930 default:
931 ctl_set_invalid_opcode(&io->scsiio);
932 ctl_config_read_done(io);
933 retval = CTL_RETVAL_COMPLETE;
934 break;
935 }
936
937 return (retval);
938}