Lines Matching refs:be_lun

214 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
221 if (be_lun->cap_bytes == 0) {
224 return (be_lun->zero_page);
226 return ((uint8_t *)be_lun->pages);
234 sx_xlock(&be_lun->page_lock);
235 pp = &be_lun->pages;
236 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
245 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
247 *pp = malloc(be_lun->pblocksize, M_RAMDISK,
251 be_lun->cap_used += be_lun->pblocksize;
253 *pp = malloc(be_lun->pblocksize, M_RAMDISK,
256 sx_xunlock(&be_lun->page_lock);
259 sx_slock(&be_lun->page_lock);
260 p = be_lun->pages;
261 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
268 sx_sunlock(&be_lun->page_lock);
270 return (be_lun->zero_page);
276 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
282 if (be_lun->cap_bytes == 0)
284 sx_xlock(&be_lun->page_lock);
285 pp = &be_lun->pages;
286 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
294 be_lun->cap_used -= be_lun->pblocksize;
298 be_lun->cap_used -= be_lun->pblocksize;
302 sx_xunlock(&be_lun->page_lock);
306 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
312 if (be_lun->cap_bytes == 0)
314 sx_xlock(&be_lun->page_lock);
315 pp = &be_lun->pages;
316 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
323 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
324 be_lun->cap_used += be_lun->pblocksize;
331 sx_xunlock(&be_lun->page_lock);
369 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
379 page = ctl_backend_ramdisk_getpage(be_lun,
406 struct ctl_be_ramdisk_lun *be_lun =
421 mtx_lock(&be_lun->queue_lock);
422 STAILQ_INSERT_TAIL(&be_lun->cont_queue,
424 mtx_unlock(&be_lun->queue_lock);
425 taskqueue_enqueue(be_lun->io_taskqueue,
426 &be_lun->io_task);
459 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
470 sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
479 page = ctl_backend_ramdisk_getpage(be_lun,
489 sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
494 page = ctl_backend_ramdisk_getpage(be_lun,
536 struct ctl_be_ramdisk_lun *be_lun;
539 be_lun = (struct ctl_be_ramdisk_lun *)context;
540 mtx_lock(&be_lun->queue_lock);
542 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
544 STAILQ_REMOVE_HEAD(&be_lun->cont_queue, links);
545 mtx_unlock(&be_lun->queue_lock);
550 mtx_lock(&be_lun->queue_lock);
560 mtx_unlock(&be_lun->queue_lock);
567 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
575 scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
576 page = ctl_backend_ramdisk_getpage(be_lun,
621 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
631 page = ctl_backend_ramdisk_getpage(be_lun, p, op);
634 min(len, be_lun->pblockmul - lbaoff) *
644 page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
652 ctl_backend_ramdisk_anchorpage(be_lun, p);
655 ctl_backend_ramdisk_unmappage(be_lun, p);
663 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
688 page = ctl_backend_ramdisk_getpage(be_lun,
799 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
803 if (be_lun->cap_bytes == 0)
805 sx_slock(&be_lun->page_lock);
807 val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
809 val = (be_lun->cap_bytes - be_lun->cap_used) /
810 be_lun->cbe_lun.blocksize;
812 sx_sunlock(&be_lun->page_lock);
858 struct ctl_be_ramdisk_lun *be_lun;
865 SLIST_FOREACH(be_lun, &softc->lun_list, links) {
866 if (be_lun->cbe_lun.lun_id == params->lun_id) {
867 SLIST_REMOVE(&softc->lun_list, be_lun,
875 if (be_lun == NULL) {
891 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
894 retval = ctl_remove_lun(&be_lun->cbe_lun);
900 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
906 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
907 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlramrm", 0);
911 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
912 if (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
914 free(be_lun, M_RAMDISK);
932 struct ctl_be_ramdisk_lun *be_lun;
943 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
944 cbe_lun = &be_lun->cbe_lun;
946 be_lun->params = req->reqdata.create;
947 be_lun->softc = softc;
953 be_lun->flags = 0;
962 be_lun->pblocksize = PAGE_SIZE;
966 be_lun->pblocksize = t;
968 if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
971 be_lun->pblocksize);
983 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
984 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
988 be_lun->pblocksize, cbe_lun->blocksize);
997 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
998 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
999 be_lun->indir = 0;
1000 t = be_lun->size_bytes / be_lun->pblocksize;
1003 be_lun->indir++;
1005 cbe_lun->maxlba = be_lun->size_blocks - 1;
1006 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
1010 cbe_lun->atomicblock = be_lun->pblocksize;
1011 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
1014 ctl_expand_number(value, &be_lun->cap_bytes);
1016 be_lun->pblockmul = 1;
1022 params->lun_size_bytes = be_lun->size_bytes;
1078 STAILQ_INIT(&be_lun->cont_queue);
1079 sx_init(&be_lun->page_lock, "ctlram page");
1080 if (be_lun->cap_bytes == 0) {
1081 be_lun->indir = 0;
1082 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
1084 be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
1086 mtx_init(&be_lun->queue_lock, "ctlram queue", NULL, MTX_DEF);
1087 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
1088 be_lun);
1090 be_lun->io_taskqueue = taskqueue_create("ctlramtq", M_WAITOK,
1091 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1092 if (be_lun->io_taskqueue == NULL) {
1098 retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue,
1106 retval = ctl_add_lun(&be_lun->cbe_lun);
1117 SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links);
1127 if (be_lun != NULL) {
1128 if (be_lun->io_taskqueue != NULL)
1129 taskqueue_free(be_lun->io_taskqueue);
1131 free(be_lun->zero_page, M_RAMDISK);
1132 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1133 sx_destroy(&be_lun->page_lock);
1134 mtx_destroy(&be_lun->queue_lock);
1135 free(be_lun, M_RAMDISK);
1144 struct ctl_be_ramdisk_lun *be_lun;
1154 SLIST_FOREACH(be_lun, &softc->lun_list, links) {
1155 if (be_lun->cbe_lun.lun_id == params->lun_id)
1159 if (be_lun == NULL) {
1165 cbe_lun = &be_lun->cbe_lun;
1168 be_lun->params.lun_size_bytes = params->lun_size_bytes;
1193 blocksize = be_lun->cbe_lun.blocksize;
1194 if (be_lun->params.lun_size_bytes < blocksize) {
1197 be_lun->params.lun_size_bytes, blocksize);
1200 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
1201 be_lun->size_bytes = be_lun->size_blocks * blocksize;
1202 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
1203 ctl_lun_capacity_changed(&be_lun->cbe_lun);
1206 params->lun_size_bytes = be_lun->size_bytes;
1221 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
1222 struct ctl_be_ramdisk_softc *softc = be_lun->softc;
1224 taskqueue_drain_all(be_lun->io_taskqueue);
1225 taskqueue_free(be_lun->io_taskqueue);
1226 nvlist_destroy(be_lun->cbe_lun.options);
1227 free(be_lun->zero_page, M_RAMDISK);
1228 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1229 sx_destroy(&be_lun->page_lock);
1230 mtx_destroy(&be_lun->queue_lock);
1233 be_lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1234 if (be_lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
1235 wakeup(be_lun);
1237 free(be_lun, M_RAMDISK);