Deleted Added
sdiff udiff text old ( 240993 ) new ( 252569 )
full compact
1/*-
2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Edward Tomasz Napierala
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * substantially similar to the "NO WARRANTY" disclaimer below
17 * ("Disclaimer") and any redistribution must be conditioned upon
18 * including a substantially similar Disclaimer requirement for further
19 * binary redistribution.
20 *
21 * NO WARRANTY
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGES.
33 *
34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
35 */
36/*
37 * CAM Target Layer backend for a "fake" ramdisk.
38 *
39 * Author: Ken Merry <ken@FreeBSD.org>
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: head/sys/cam/ctl/ctl_backend_ramdisk.c 240993 2012-09-27 10:51:38Z trasz $");
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/condvar.h>
49#include <sys/types.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/malloc.h>
53#include <sys/time.h>
54#include <sys/queue.h>
55#include <sys/conf.h>
56#include <sys/ioccom.h>
57#include <sys/module.h>
58
59#include <cam/scsi/scsi_all.h>
60#include <cam/ctl/ctl_io.h>
61#include <cam/ctl/ctl.h>
62#include <cam/ctl/ctl_util.h>
63#include <cam/ctl/ctl_backend.h>
64#include <cam/ctl/ctl_frontend_internal.h>
65#include <cam/ctl/ctl_debug.h>
66#include <cam/ctl/ctl_ioctl.h>
67#include <cam/ctl/ctl_error.h>
68
69typedef enum {
70 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
71 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02,
72 CTL_BE_RAMDISK_LUN_WAITING = 0x04
73} ctl_be_ramdisk_lun_flags;
74
75struct ctl_be_ramdisk_lun {
76 uint64_t size_bytes;
77 uint64_t size_blocks;
78 struct ctl_be_ramdisk_softc *softc;
79 ctl_be_ramdisk_lun_flags flags;
80 STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
81 struct ctl_be_lun ctl_be_lun;
82};
83
84struct ctl_be_ramdisk_softc {
85 struct mtx lock;
86 int rd_size;
87#ifdef CTL_RAMDISK_PAGES
88 uint8_t **ramdisk_pages;
89 int num_pages;
90#else
91 uint8_t *ramdisk_buffer;
92#endif
93 int num_luns;
94 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
95};
96
97static struct ctl_be_ramdisk_softc rd_softc;
98
99int ctl_backend_ramdisk_init(void);
100void ctl_backend_ramdisk_shutdown(void);
101static int ctl_backend_ramdisk_move_done(union ctl_io *io);
102static int ctl_backend_ramdisk_submit(union ctl_io *io);
103static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
104 caddr_t addr, int flag, struct thread *td);
105static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
106 struct ctl_lun_req *req);
107static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
108 struct ctl_lun_req *req, int do_wait);
109static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
110 struct ctl_lun_req *req);
111static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
112static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
113 ctl_lun_config_status status);
114static int ctl_backend_ramdisk_config_write(union ctl_io *io);
115static int ctl_backend_ramdisk_config_read(union ctl_io *io);
116
117static struct ctl_backend_driver ctl_be_ramdisk_driver =
118{
119 .name = "ramdisk",
120 .flags = CTL_BE_FLAG_HAS_CONFIG,
121 .init = ctl_backend_ramdisk_init,
122 .data_submit = ctl_backend_ramdisk_submit,
123 .data_move_done = ctl_backend_ramdisk_move_done,
124 .config_read = ctl_backend_ramdisk_config_read,
125 .config_write = ctl_backend_ramdisk_config_write,
126 .ioctl = ctl_backend_ramdisk_ioctl
127};
128
129MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
130CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
131
132int
133ctl_backend_ramdisk_init(void)
134{
135 struct ctl_be_ramdisk_softc *softc;
136#ifdef CTL_RAMDISK_PAGES
137 int i;
138#endif
139
140
141 softc = &rd_softc;
142
143 memset(softc, 0, sizeof(*softc));
144
145 mtx_init(&softc->lock, "ramdisk", NULL, MTX_DEF);
146
147 STAILQ_INIT(&softc->lun_list);
148 softc->rd_size = 4 * 1024 * 1024;
149#ifdef CTL_RAMDISK_PAGES
150 softc->num_pages = softc->rd_size / PAGE_SIZE;
151 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
152 softc->num_pages, M_RAMDISK,
153 M_WAITOK);
154 for (i = 0; i < softc->num_pages; i++)
155 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
156#else
157 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
158 M_WAITOK);
159#endif
160
161 return (0);
162}
163
164void
165ctl_backend_ramdisk_shutdown(void)
166{
167 struct ctl_be_ramdisk_softc *softc;
168 struct ctl_be_ramdisk_lun *lun, *next_lun;
169#ifdef CTL_RAMDISK_PAGES
170 int i;
171#endif
172
173 softc = &rd_softc;
174
175 mtx_lock(&softc->lock);
176 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
177 /*
178 * Grab the next LUN. The current LUN may get removed by
179 * ctl_invalidate_lun(), which will call our LUN shutdown
180 * routine, if there is no outstanding I/O for this LUN.
181 */
182 next_lun = STAILQ_NEXT(lun, links);
183
184 /*
185 * Drop our lock here. Since ctl_invalidate_lun() can call
186 * back into us, this could potentially lead to a recursive
187 * lock of the same mutex, which would cause a hang.
188 */
189 mtx_unlock(&softc->lock);
190 ctl_disable_lun(&lun->ctl_be_lun);
191 ctl_invalidate_lun(&lun->ctl_be_lun);
192 mtx_lock(&softc->lock);
193 }
194 mtx_unlock(&softc->lock);
195
196#ifdef CTL_RAMDISK_PAGES
197 for (i = 0; i < softc->num_pages; i++)
198 free(softc->ramdisk_pages[i], M_RAMDISK);
199
200 free(softc->ramdisk_pages, M_RAMDISK);
201#else
202 free(softc->ramdisk_buffer, M_RAMDISK);
203#endif
204
205 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
206 printf("ctl_backend_ramdisk_shutdown: "
207 "ctl_backend_deregister() failed!\n");
208 }
209}
210
211static int
212ctl_backend_ramdisk_move_done(union ctl_io *io)
213{
214#ifdef CTL_TIME_IO
215 struct bintime cur_bt;
216#endif
217
218 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
219 if ((io->io_hdr.port_status == 0)
220 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
221 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
222 io->io_hdr.status = CTL_SUCCESS;
223 else if ((io->io_hdr.port_status != 0)
224 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
225 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
226 /*
227 * For hardware error sense keys, the sense key
228 * specific value is defined to be a retry count,
229 * but we use it to pass back an internal FETD
230 * error code. XXX KDM Hopefully the FETD is only
231 * using 16 bits for an error code, since that's
232 * all the space we have in the sks field.
233 */
234 ctl_set_internal_failure(&io->scsiio,
235 /*sks_valid*/ 1,
236 /*retry_count*/
237 io->io_hdr.port_status);
238 }
239#ifdef CTL_TIME_IO
240 getbintime(&cur_bt);
241 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
242 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
243 io->io_hdr.num_dmas++;
244#endif
245
246 if (io->scsiio.kern_sg_entries > 0)
247 free(io->scsiio.kern_data_ptr, M_RAMDISK);
248 ctl_done(io);
249 return(0);
250}
251
252static int
253ctl_backend_ramdisk_submit(union ctl_io *io)
254{
255 struct ctl_lba_len lbalen;
256#ifdef CTL_RAMDISK_PAGES
257 struct ctl_sg_entry *sg_entries;
258 int len_filled;
259 int i;
260#endif
261 int num_sg_entries, len;
262 struct ctl_be_ramdisk_softc *softc;
263 struct ctl_be_lun *ctl_be_lun;
264 struct ctl_be_ramdisk_lun *be_lun;
265
266 softc = &rd_softc;
267
268 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
269 CTL_PRIV_BACKEND_LUN].ptr;
270 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
271
272 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
273 sizeof(lbalen));
274
275 len = lbalen.len * ctl_be_lun->blocksize;
276
277 /*
278 * Kick out the request if it's bigger than we can handle.
279 */
280 if (len > softc->rd_size) {
281 ctl_set_internal_failure(&io->scsiio,
282 /*sks_valid*/ 0,
283 /*retry_count*/ 0);
284 ctl_done(io);
285 return (CTL_RETVAL_COMPLETE);
286 }
287
288 /*
289 * Kick out the request if it's larger than the device size that
290 * the user requested.
291 */
292 if (((lbalen.lba * ctl_be_lun->blocksize) + len) > be_lun->size_bytes) {
293 ctl_set_lba_out_of_range(&io->scsiio);
294 ctl_done(io);
295 return (CTL_RETVAL_COMPLETE);
296 }
297
298#ifdef CTL_RAMDISK_PAGES
299 num_sg_entries = len >> PAGE_SHIFT;
300 if ((len & (PAGE_SIZE - 1)) != 0)
301 num_sg_entries++;
302
303 if (num_sg_entries > 1) {
304 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
305 num_sg_entries, M_RAMDISK,
306 M_WAITOK);
307 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
308 for (i = 0, len_filled = 0; i < num_sg_entries;
309 i++, len_filled += PAGE_SIZE) {
310 sg_entries[i].addr = softc->ramdisk_pages[i];
311 sg_entries[i].len = ctl_min(PAGE_SIZE,
312 len - len_filled);
313 }
314 } else {
315#endif /* CTL_RAMDISK_PAGES */
316 /*
317 * If this is less than 1 page, don't bother allocating a
318 * scatter/gather list for it. This saves time/overhead.
319 */
320 num_sg_entries = 0;
321#ifdef CTL_RAMDISK_PAGES
322 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
323#else
324 io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
325#endif
326#ifdef CTL_RAMDISK_PAGES
327 }
328#endif
329
330 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
331 io->scsiio.kern_data_len = len;
332 io->scsiio.kern_total_len = len;
333 io->scsiio.kern_rel_offset = 0;
334 io->scsiio.kern_data_resid = 0;
335 io->scsiio.kern_sg_entries = num_sg_entries;
336 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
337#ifdef CTL_TIME_IO
338 getbintime(&io->io_hdr.dma_start_bt);
339#endif
340 ctl_datamove(io);
341
342 return (CTL_RETVAL_COMPLETE);
343}
344
345static int
346ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
347 int flag, struct thread *td)
348{
349 struct ctl_be_ramdisk_softc *softc;
350 int retval;
351
352 retval = 0;
353 softc = &rd_softc;
354
355 switch (cmd) {
356 case CTL_LUN_REQ: {
357 struct ctl_lun_req *lun_req;
358
359 lun_req = (struct ctl_lun_req *)addr;
360
361 switch (lun_req->reqtype) {
362 case CTL_LUNREQ_CREATE:
363 retval = ctl_backend_ramdisk_create(softc, lun_req,
364 /*do_wait*/ 1);
365 break;
366 case CTL_LUNREQ_RM:
367 retval = ctl_backend_ramdisk_rm(softc, lun_req);
368 break;
369 case CTL_LUNREQ_MODIFY:
370 retval = ctl_backend_ramdisk_modify(softc, lun_req);
371 break;
372 default:
373 lun_req->status = CTL_LUN_ERROR;
374 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
375 "%s: invalid LUN request type %d", __func__,
376 lun_req->reqtype);
377 break;
378 }
379 break;
380 }
381 default:
382 retval = ENOTTY;
383 break;
384 }
385
386 return (retval);
387}
388
389static int
390ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
391 struct ctl_lun_req *req)
392{
393 struct ctl_be_ramdisk_lun *be_lun;
394 struct ctl_lun_rm_params *params;
395 int retval;
396
397
398 retval = 0;
399 params = &req->reqdata.rm;
400
401 be_lun = NULL;
402
403 mtx_lock(&softc->lock);
404
405 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
406 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
407 break;
408 }
409 mtx_unlock(&softc->lock);
410
411 if (be_lun == NULL) {
412 snprintf(req->error_str, sizeof(req->error_str),
413 "%s: LUN %u is not managed by the ramdisk backend",
414 __func__, params->lun_id);
415 goto bailout_error;
416 }
417
418 retval = ctl_disable_lun(&be_lun->ctl_be_lun);
419
420 if (retval != 0) {
421 snprintf(req->error_str, sizeof(req->error_str),
422 "%s: error %d returned from ctl_disable_lun() for "
423 "LUN %d", __func__, retval, params->lun_id);
424 goto bailout_error;
425 }
426
427 /*
428 * Set the waiting flag before we invalidate the LUN. Our shutdown
429 * routine can be called any time after we invalidate the LUN,
430 * and can be called from our context.
431 *
432 * This tells the shutdown routine that we're waiting, or we're
433 * going to wait for the shutdown to happen.
434 */
435 mtx_lock(&softc->lock);
436 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
437 mtx_unlock(&softc->lock);
438
439 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
440 if (retval != 0) {
441 snprintf(req->error_str, sizeof(req->error_str),
442 "%s: error %d returned from ctl_invalidate_lun() for "
443 "LUN %d", __func__, retval, params->lun_id);
444 goto bailout_error;
445 }
446
447 mtx_lock(&softc->lock);
448
449 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
450 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
451 if (retval == EINTR)
452 break;
453 }
454 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
455
456 /*
457 * We only remove this LUN from the list and free it (below) if
458 * retval == 0. If the user interrupted the wait, we just bail out
459 * without actually freeing the LUN. We let the shutdown routine
460 * free the LUN if that happens.
461 */
462 if (retval == 0) {
463 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
464 links);
465 softc->num_luns--;
466 }
467
468 mtx_unlock(&softc->lock);
469
470 if (retval == 0)
471 free(be_lun, M_RAMDISK);
472
473 req->status = CTL_LUN_OK;
474
475 return (retval);
476
477bailout_error:
478
479 /*
480 * Don't leave the waiting flag set.
481 */
482 mtx_lock(&softc->lock);
483 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
484 mtx_unlock(&softc->lock);
485
486 req->status = CTL_LUN_ERROR;
487
488 return (0);
489}
490
491static int
492ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
493 struct ctl_lun_req *req, int do_wait)
494{
495 struct ctl_be_ramdisk_lun *be_lun;
496 struct ctl_lun_create_params *params;
497 uint32_t blocksize;
498 char tmpstr[32];
499 int retval;
500
501 retval = 0;
502 params = &req->reqdata.create;
503 if (params->blocksize_bytes != 0)
504 blocksize = params->blocksize_bytes;
505 else
506 blocksize = 512;
507
508 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ?
509 M_WAITOK : M_NOWAIT));
510
511 if (be_lun == NULL) {
512 snprintf(req->error_str, sizeof(req->error_str),
513 "%s: error allocating %zd bytes", __func__,
514 sizeof(*be_lun));
515 goto bailout_error;
516 }
517
518 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
519 be_lun->ctl_be_lun.lun_type = params->device_type;
520 else
521 be_lun->ctl_be_lun.lun_type = T_DIRECT;
522
523 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
524
525 if (params->lun_size_bytes < blocksize) {
526 snprintf(req->error_str, sizeof(req->error_str),
527 "%s: LUN size %ju < blocksize %u", __func__,
528 params->lun_size_bytes, blocksize);
529 goto bailout_error;
530 }
531
532 be_lun->size_blocks = params->lun_size_bytes / blocksize;
533 be_lun->size_bytes = be_lun->size_blocks * blocksize;
534
535 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
536 } else {
537 be_lun->ctl_be_lun.maxlba = 0;
538 blocksize = 0;
539 be_lun->size_bytes = 0;
540 be_lun->size_blocks = 0;
541 }
542
543 be_lun->ctl_be_lun.blocksize = blocksize;
544
545 /* Tell the user the blocksize we ended up using */
546 params->blocksize_bytes = blocksize;
547
548 /* Tell the user the exact size we ended up using */
549 params->lun_size_bytes = be_lun->size_bytes;
550
551 be_lun->softc = softc;
552
553 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
554 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
555 be_lun->ctl_be_lun.be_lun = be_lun;
556
557 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
558 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
559 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
560 } else
561 be_lun->ctl_be_lun.req_lun_id = 0;
562
563 be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
564 be_lun->ctl_be_lun.lun_config_status =
565 ctl_backend_ramdisk_lun_config_status;
566 be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver;
567 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
568 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
569 softc->num_luns);
570 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
571 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
572 sizeof(tmpstr)));
573
574 /* Tell the user what we used for a serial number */
575 strncpy((char *)params->serial_num, tmpstr,
576 ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
577 } else {
578 strncpy((char *)be_lun->ctl_be_lun.serial_num,
579 params->serial_num,
580 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
581 sizeof(params->serial_num)));
582 }
583 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
584 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
585 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
586 ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
587 sizeof(tmpstr)));
588
589 /* Tell the user what we used for a device ID */
590 strncpy((char *)params->device_id, tmpstr,
591 ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
592 } else {
593 strncpy((char *)be_lun->ctl_be_lun.device_id,
594 params->device_id,
595 ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
596 sizeof(params->device_id)));
597 }
598
599 mtx_lock(&softc->lock);
600 softc->num_luns++;
601 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
602
603 mtx_unlock(&softc->lock);
604
605 retval = ctl_add_lun(&be_lun->ctl_be_lun);
606 if (retval != 0) {
607 mtx_lock(&softc->lock);
608 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
609 links);
610 softc->num_luns--;
611 mtx_unlock(&softc->lock);
612 snprintf(req->error_str, sizeof(req->error_str),
613 "%s: ctl_add_lun() returned error %d, see dmesg for "
614 "details", __func__, retval);
615 retval = 0;
616 goto bailout_error;
617 }
618
619 if (do_wait == 0)
620 return (retval);
621
622 mtx_lock(&softc->lock);
623
624 /*
625 * Tell the config_status routine that we're waiting so it won't
626 * clean up the LUN in the event of an error.
627 */
628 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
629
630 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
631 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
632 if (retval == EINTR)
633 break;
634 }
635 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
636
637 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
638 snprintf(req->error_str, sizeof(req->error_str),
639 "%s: LUN configuration error, see dmesg for details",
640 __func__);
641 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
642 links);
643 softc->num_luns--;
644 mtx_unlock(&softc->lock);
645 goto bailout_error;
646 } else {
647 params->req_lun_id = be_lun->ctl_be_lun.lun_id;
648 }
649 mtx_unlock(&softc->lock);
650
651 req->status = CTL_LUN_OK;
652
653 return (retval);
654
655bailout_error:
656 req->status = CTL_LUN_ERROR;
657 free(be_lun, M_RAMDISK);
658
659 return (retval);
660}
661
662static int
663ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
664 struct ctl_lun_req *req)
665{
666 struct ctl_be_ramdisk_lun *be_lun;
667 struct ctl_lun_modify_params *params;
668 uint32_t blocksize;
669
670 params = &req->reqdata.modify;
671
672 be_lun = NULL;
673
674 mtx_lock(&softc->lock);
675 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
676 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
677 break;
678 }
679 mtx_unlock(&softc->lock);
680
681 if (be_lun == NULL) {
682 snprintf(req->error_str, sizeof(req->error_str),
683 "%s: LUN %u is not managed by the ramdisk backend",
684 __func__, params->lun_id);
685 goto bailout_error;
686 }
687
688 if (params->lun_size_bytes == 0) {
689 snprintf(req->error_str, sizeof(req->error_str),
690 "%s: LUN size \"auto\" not supported "
691 "by the ramdisk backend", __func__);
692 goto bailout_error;
693 }
694
695 blocksize = be_lun->ctl_be_lun.blocksize;
696
697 if (params->lun_size_bytes < blocksize) {
698 snprintf(req->error_str, sizeof(req->error_str),
699 "%s: LUN size %ju < blocksize %u", __func__,
700 params->lun_size_bytes, blocksize);
701 goto bailout_error;
702 }
703
704 be_lun->size_blocks = params->lun_size_bytes / blocksize;
705 be_lun->size_bytes = be_lun->size_blocks * blocksize;
706
707 /*
708 * The maximum LBA is the size - 1.
709 *
710 * XXX: Note that this field is being updated without locking,
711 * which might cause problems on 32-bit architectures.
712 */
713 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
714 ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
715
716 /* Tell the user the exact size we ended up using */
717 params->lun_size_bytes = be_lun->size_bytes;
718
719 req->status = CTL_LUN_OK;
720
721 return (0);
722
723bailout_error:
724 req->status = CTL_LUN_ERROR;
725
726 return (0);
727}
728
729static void
730ctl_backend_ramdisk_lun_shutdown(void *be_lun)
731{
732 struct ctl_be_ramdisk_lun *lun;
733 struct ctl_be_ramdisk_softc *softc;
734 int do_free;
735
736 lun = (struct ctl_be_ramdisk_lun *)be_lun;
737 softc = lun->softc;
738 do_free = 0;
739
740 mtx_lock(&softc->lock);
741
742 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
743
744 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
745 wakeup(lun);
746 } else {
747 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
748 links);
749 softc->num_luns--;
750 do_free = 1;
751 }
752
753 mtx_unlock(&softc->lock);
754
755 if (do_free != 0)
756 free(be_lun, M_RAMDISK);
757}
758
759static void
760ctl_backend_ramdisk_lun_config_status(void *be_lun,
761 ctl_lun_config_status status)
762{
763 struct ctl_be_ramdisk_lun *lun;
764 struct ctl_be_ramdisk_softc *softc;
765
766 lun = (struct ctl_be_ramdisk_lun *)be_lun;
767 softc = lun->softc;
768
769 if (status == CTL_LUN_CONFIG_OK) {
770 mtx_lock(&softc->lock);
771 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
772 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
773 wakeup(lun);
774 mtx_unlock(&softc->lock);
775
776 /*
777 * We successfully added the LUN, attempt to enable it.
778 */
779 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
780 printf("%s: ctl_enable_lun() failed!\n", __func__);
781 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
782 printf("%s: ctl_invalidate_lun() failed!\n",
783 __func__);
784 }
785 }
786
787 return;
788 }
789
790
791 mtx_lock(&softc->lock);
792 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
793
794 /*
795 * If we have a user waiting, let him handle the cleanup. If not,
796 * clean things up here.
797 */
798 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
799 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
800 wakeup(lun);
801 } else {
802 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
803 links);
804 softc->num_luns--;
805 free(lun, M_RAMDISK);
806 }
807 mtx_unlock(&softc->lock);
808}
809
810static int
811ctl_backend_ramdisk_config_write(union ctl_io *io)
812{
813 struct ctl_be_ramdisk_softc *softc;
814 int retval;
815
816 retval = 0;
817 softc = &rd_softc;
818
819 switch (io->scsiio.cdb[0]) {
820 case SYNCHRONIZE_CACHE:
821 case SYNCHRONIZE_CACHE_16:
822 /*
823 * The upper level CTL code will filter out any CDBs with
824 * the immediate bit set and return the proper error. It
825 * will also not allow a sync cache command to go to a LUN
826 * that is powered down.
827 *
828 * We don't really need to worry about what LBA range the
829 * user asked to be synced out. When they issue a sync
830 * cache command, we'll sync out the whole thing.
831 *
832 * This is obviously just a stubbed out implementation.
833 * The real implementation will be in the RAIDCore/CTL
834 * interface, and can only really happen when RAIDCore
835 * implements a per-array cache sync.
836 */
837 ctl_set_success(&io->scsiio);
838 ctl_config_write_done(io);
839 break;
840 case START_STOP_UNIT: {
841 struct scsi_start_stop_unit *cdb;
842 struct ctl_be_lun *ctl_be_lun;
843 struct ctl_be_ramdisk_lun *be_lun;
844
845 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
846
847 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
848 CTL_PRIV_BACKEND_LUN].ptr;
849 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
850
851 if (cdb->how & SSS_START)
852 retval = ctl_start_lun(ctl_be_lun);
853 else {
854 retval = ctl_stop_lun(ctl_be_lun);
855#ifdef NEEDTOPORT
856 if ((retval == 0)
857 && (cdb->byte2 & SSS_ONOFFLINE))
858 retval = ctl_lun_offline(ctl_be_lun);
859#endif
860 }
861
862 /*
863 * In general, the above routines should not fail. They
864 * just set state for the LUN. So we've got something
865 * pretty wrong here if we can't start or stop the LUN.
866 */
867 if (retval != 0) {
868 ctl_set_internal_failure(&io->scsiio,
869 /*sks_valid*/ 1,
870 /*retry_count*/ 0xf051);
871 retval = CTL_RETVAL_COMPLETE;
872 } else {
873 ctl_set_success(&io->scsiio);
874 }
875 ctl_config_write_done(io);
876 break;
877 }
878 default:
879 ctl_set_invalid_opcode(&io->scsiio);
880 ctl_config_write_done(io);
881 retval = CTL_RETVAL_COMPLETE;
882 break;
883 }
884
885 return (retval);
886}
887
888static int
889ctl_backend_ramdisk_config_read(union ctl_io *io)
890{
891 /*
892 * XXX KDM need to implement!!
893 */
894 return (0);
895}