Lines Matching defs:vdc

27  * LDoms virtual disk client (vdc) device driver
39 * Setup the communications link over the LDC channel that vdc uses to
44 * The upper layers call into vdc via strategy(9E) and DKIO(7I)
45 * ioctl calls. vdc will copy the data to be written to the descriptor
51 * The vDisk server will ACK some or all of the messages vdc sends to it
53 * vdc will check the descriptor ring and signal to the upper layer
99 #include <sys/vdc.h>
129 static int vdc_send(vdc_t *vdc, caddr_t pkt, size_t *msglen);
130 static int vdc_do_ldc_init(vdc_t *vdc, vdc_server_t *srvr);
131 static int vdc_start_ldc_connection(vdc_t *vdc);
132 static int vdc_create_device_nodes(vdc_t *vdc);
133 static int vdc_create_device_nodes_efi(vdc_t *vdc);
134 static int vdc_create_device_nodes_vtoc(vdc_t *vdc);
135 static void vdc_create_io_kstats(vdc_t *vdc);
136 static void vdc_create_err_kstats(vdc_t *vdc);
137 static void vdc_set_err_kstats(vdc_t *vdc);
140 static int vdc_init_ports(vdc_t *vdc, md_t *mdp, mde_cookie_t vd_nodep);
141 static void vdc_fini_ports(vdc_t *vdc);
143 static int vdc_do_ldc_up(vdc_t *vdc);
144 static void vdc_terminate_ldc(vdc_t *vdc, vdc_server_t *srvr);
145 static int vdc_init_descriptor_ring(vdc_t *vdc);
146 static void vdc_destroy_descriptor_ring(vdc_t *vdc);
147 static int vdc_setup_devid(vdc_t *vdc);
151 static void vdc_store_label_unk(vdc_t *vdc);
152 static boolean_t vdc_is_opened(vdc_t *vdc);
153 static void vdc_update_size(vdc_t *vdc, size_t, size_t, size_t);
154 static int vdc_update_vio_bsize(vdc_t *vdc, uint32_t);
157 static int vdc_init_ver_negotiation(vdc_t *vdc, vio_ver_t ver);
159 static int vdc_init_attr_negotiation(vdc_t *vdc);
161 static int vdc_init_dring_negotiate(vdc_t *vdc);
168 static void vdc_process_msg_thread(vdc_t *vdc);
169 static int vdc_recv(vdc_t *vdc, vio_msg_t *msgp, size_t *nbytesp);
172 static int vdc_process_data_msg(vdc_t *vdc, vio_msg_t *msg);
173 static int vdc_handle_ver_msg(vdc_t *vdc, vio_ver_msg_t *ver_msg);
174 static int vdc_handle_attr_msg(vdc_t *vdc, vd_attr_msg_t *attr_msg);
175 static int vdc_handle_dring_reg_msg(vdc_t *vdc, vio_dring_reg_msg_t *msg);
186 static int vdc_do_op(vdc_t *vdc, int op, caddr_t addr, size_t nbytes,
192 static int vdc_depopulate_descriptor(vdc_t *vdc, uint_t idx);
194 static int vdc_verify_seq_num(vdc_t *vdc, vio_dring_msg_t *dring_msg);
200 static void vdc_create_fake_geometry(vdc_t *vdc);
201 static int vdc_validate_geometry(vdc_t *vdc);
202 static void vdc_validate(vdc_t *vdc);
204 static int vdc_null_copy_func(vdc_t *vdc, void *from, void *to,
206 static int vdc_get_wce_convert(vdc_t *vdc, void *from, void *to,
208 static int vdc_set_wce_convert(vdc_t *vdc, void *from, void *to,
210 static int vdc_get_vtoc_convert(vdc_t *vdc, void *from, void *to,
212 static int vdc_set_vtoc_convert(vdc_t *vdc, void *from, void *to,
214 static int vdc_get_extvtoc_convert(vdc_t *vdc, void *from, void *to,
216 static int vdc_set_extvtoc_convert(vdc_t *vdc, void *from, void *to,
218 static int vdc_get_geom_convert(vdc_t *vdc, void *from, void *to,
220 static int vdc_set_geom_convert(vdc_t *vdc, void *from, void *to,
222 static int vdc_get_efi_convert(vdc_t *vdc, void *from, void *to,
224 static int vdc_set_efi_convert(vdc_t *vdc, void *from, void *to,
227 static void vdc_ownership_update(vdc_t *vdc, int ownership_flags);
228 static int vdc_access_set(vdc_t *vdc, uint64_t flags);
229 static vdc_io_t *vdc_eio_queue(vdc_t *vdc, int index);
230 static void vdc_eio_unqueue(vdc_t *vdc, clock_t deadline,
232 static int vdc_eio_check(vdc_t *vdc, int flags);
242 * server if vdc receives a LDC reset event during the initiation of the
243 * handshake. This can happen if vdc reset the LDC channel and then immediately
256 * attach (i.e. the vdc lifecycle is VDC_ONLINE_PENDING) then the handshake
268 * 50 seconds x max(number of servers, vdc->hattr_min)
283 * Tunable variables to control how long vdc waits before timing out on
305 /* Count of the number of vdc instances attached */
319 * to the vdc instance the vdc_msglevel applies.
420 vdc_t *vdc = NULL;
424 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) {
428 *resultp = vdc->dip;
446 vdc_t *vdc = NULL;
463 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) {
468 if (vdc_is_opened(vdc)) {
469 DMSG(vdc, 0, "[%d] Cannot detach: device is open", instance);
473 if (vdc->dkio_flush_pending) {
474 DMSG(vdc, 0,
476 instance, vdc->dkio_flush_pending);
480 if (vdc->validate_pending) {
481 DMSG(vdc, 0,
483 instance, vdc->validate_pending);
487 DMSG(vdc, 0, "[%d] proceeding...\n", instance);
490 mutex_enter(&vdc->ownership_lock);
491 if (vdc->ownership & VDC_OWNERSHIP_GRANTED) {
492 rv = vdc_access_set(vdc, VD_ACCESS_SET_CLEAR);
494 vdc_ownership_update(vdc, VDC_OWNERSHIP_NONE);
497 mutex_exit(&vdc->ownership_lock);
500 mutex_enter(&vdc->lock);
501 vdc->lifecycle = VDC_LC_DETACHING;
502 mutex_exit(&vdc->lock);
508 for (srvr = vdc->server_list; srvr != NULL; srvr = srvr->next) {
510 DMSG(vdc, 0, "callback disabled (ldc=%lu, rv=%d)\n",
514 if (vdc->initialized & VDC_THREAD) {
515 mutex_enter(&vdc->read_lock);
516 if ((vdc->read_state == VDC_READ_WAITING) ||
517 (vdc->read_state == VDC_READ_RESET)) {
518 vdc->read_state = VDC_READ_RESET;
519 cv_signal(&vdc->read_cv);
522 mutex_exit(&vdc->read_lock);
525 mutex_enter(&vdc->lock);
526 if (vdc->state == VDC_STATE_INIT_WAITING) {
527 DMSG(vdc, 0,
530 vdc->state = VDC_STATE_RESETTING;
531 cv_signal(&vdc->initwait_cv);
532 } else if (vdc->state == VDC_STATE_FAILED) {
533 vdc->io_pending = B_TRUE;
534 cv_signal(&vdc->io_pending_cv);
536 mutex_exit(&vdc->lock);
539 thread_join(vdc->msg_proc_thr->t_did);
540 ASSERT(vdc->state == VDC_STATE_DETACH);
541 DMSG(vdc, 0, "[%d] Reset thread exit and join ..\n",
542 vdc->instance);
545 mutex_enter(&vdc->lock);
547 if (vdc->initialized & VDC_DRING)
548 vdc_destroy_descriptor_ring(vdc);
550 vdc_fini_ports(vdc);
552 if (vdc->eio_thread) {
553 eio_tid = vdc->eio_thread->t_did;
554 vdc->failfast_interval = 0;
555 ASSERT(vdc->num_servers == 0);
556 cv_signal(&vdc->eio_cv);
561 if (vdc->ownership & VDC_OWNERSHIP_WANTED) {
562 ownership_tid = vdc->ownership_thread->t_did;
563 vdc->ownership = VDC_OWNERSHIP_NONE;
564 cv_signal(&vdc->ownership_cv);
569 mutex_exit(&vdc->lock);
577 if (vdc->initialized & VDC_MINOR)
580 if (vdc->io_stats) {
581 kstat_delete(vdc->io_stats);
582 vdc->io_stats = NULL;
585 if (vdc->err_stats) {
586 kstat_delete(vdc->err_stats);
587 vdc->err_stats = NULL;
590 if (vdc->initialized & VDC_LOCKS) {
591 mutex_destroy(&vdc->lock);
592 mutex_destroy(&vdc->read_lock);
593 mutex_destroy(&vdc->ownership_lock);
594 cv_destroy(&vdc->initwait_cv);
595 cv_destroy(&vdc->dring_free_cv);
596 cv_destroy(&vdc->membind_cv);
597 cv_destroy(&vdc->sync_blocked_cv);
598 cv_destroy(&vdc->read_cv);
599 cv_destroy(&vdc->running_cv);
600 cv_destroy(&vdc->io_pending_cv);
601 cv_destroy(&vdc->ownership_cv);
602 cv_destroy(&vdc->eio_cv);
605 if (vdc->minfo)
606 kmem_free(vdc->minfo, sizeof (struct dk_minfo));
608 if (vdc->cinfo)
609 kmem_free(vdc->cinfo, sizeof (struct dk_cinfo));
611 if (vdc->vtoc)
612 kmem_free(vdc->vtoc, sizeof (struct extvtoc));
614 if (vdc->geom)
615 kmem_free(vdc->geom, sizeof (struct dk_geom));
617 if (vdc->devid) {
619 ddi_devid_free(vdc->devid);
622 if (vdc->initialized & VDC_SOFT_STATE)
625 DMSG(vdc, 0, "[%d] End %p\n", instance, (void *)vdc);
635 vdc_t *vdc = NULL;
649 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) {
658 vdc->initialized = VDC_SOFT_STATE;
663 vdc->dip = dip;
664 vdc->instance = instance;
665 vdc->vdisk_type = VD_DISK_TYPE_UNK;
666 vdc->vdisk_label = VD_DISK_LABEL_UNK;
667 vdc->state = VDC_STATE_INIT;
668 vdc->lifecycle = VDC_LC_ATTACHING;
669 vdc->session_id = 0;
670 vdc->vdisk_bsize = DEV_BSIZE;
671 vdc->vio_bmask = 0;
672 vdc->vio_bshift = 0;
673 vdc->max_xfer_sz = maxphys / vdc->vdisk_bsize;
678 * in vdc for supported operations early in the handshake process).
683 vdc->operations = VD_OP_MASK_READ;
685 vdc->vtoc = NULL;
686 vdc->geom = NULL;
687 vdc->cinfo = NULL;
688 vdc->minfo = NULL;
690 mutex_init(&vdc->lock, NULL, MUTEX_DRIVER, NULL);
691 cv_init(&vdc->initwait_cv, NULL, CV_DRIVER, NULL);
692 cv_init(&vdc->dring_free_cv, NULL, CV_DRIVER, NULL);
693 cv_init(&vdc->membind_cv, NULL, CV_DRIVER, NULL);
694 cv_init(&vdc->running_cv, NULL, CV_DRIVER, NULL);
695 cv_init(&vdc->io_pending_cv, NULL, CV_DRIVER, NULL);
697 vdc->io_pending = B_FALSE;
698 vdc->threads_pending = 0;
699 vdc->sync_op_blocked = B_FALSE;
700 cv_init(&vdc->sync_blocked_cv, NULL, CV_DRIVER, NULL);
702 mutex_init(&vdc->ownership_lock, NULL, MUTEX_DRIVER, NULL);
703 cv_init(&vdc->ownership_cv, NULL, CV_DRIVER, NULL);
704 cv_init(&vdc->eio_cv, NULL, CV_DRIVER, NULL);
707 mutex_init(&vdc->read_lock, NULL, MUTEX_DRIVER, NULL);
708 cv_init(&vdc->read_cv, NULL, CV_DRIVER, NULL);
709 vdc->read_state = VDC_READ_IDLE;
711 vdc->initialized |= VDC_LOCKS;
720 if (vdc_init_ports(vdc, mdp, vd_node) != 0) {
728 vdc_create_io_kstats(vdc);
729 vdc_create_err_kstats(vdc);
732 vdc->vdisk_label = VD_DISK_LABEL_UNK;
733 vdc->vtoc = kmem_zalloc(sizeof (struct extvtoc), KM_SLEEP);
734 vdc->geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP);
735 vdc->minfo = kmem_zalloc(sizeof (struct dk_minfo), KM_SLEEP);
738 vdc->msg_proc_thr = thread_create(NULL, 0, vdc_process_msg_thread,
739 vdc, 0, &p0, TS_RUN, minclsyspri);
740 if (vdc->msg_proc_thr == NULL) {
749 if (vdc->num_servers > 1) {
750 vdc->eio_thread = thread_create(NULL, 0, vdc_eio_thread, vdc, 0,
752 if (vdc->eio_thread == NULL) {
759 vdc->initialized |= VDC_THREAD;
770 mutex_enter(&vdc->lock);
771 (void) vdc_validate_geometry(vdc);
772 mutex_exit(&vdc->lock);
777 status = vdc_create_device_nodes(vdc);
779 DMSG(vdc, 0, "[%d] Failed to create device nodes",
788 vdc_set_err_kstats(vdc);
790 ASSERT(vdc->lifecycle == VDC_LC_ONLINE ||
791 vdc->lifecycle == VDC_LC_ONLINE_PENDING);
792 DMSG(vdc, 0, "[%d] Attach tasks successful\n", instance);
795 DMSG(vdc, 0, "[%d] Attach completed\n", instance);
818 vdc_do_ldc_init(vdc_t *vdc, vdc_server_t *srvr)
824 ASSERT(vdc != NULL);
828 ldc_attr.instance = vdc->instance;
836 DMSG(vdc, 0, "[%d] ldc_init(chan %ld) returned %d",
837 vdc->instance, srvr->ldc_id, status);
844 DMSG(vdc, 0, "[%d] Cannot discover LDC status [err=%d]",
845 vdc->instance, status);
854 DMSG(vdc, 0, "[%d] LDC callback reg. failed (%d)",
855 vdc->instance, status);
868 DMSG(vdc, 0, "[%d] ldc_open(chan %ld) returned %d",
869 vdc->instance, srvr->ldc_id, status);
877 vdc_terminate_ldc(vdc, srvr);
884 vdc_start_ldc_connection(vdc_t *vdc)
888 ASSERT(vdc != NULL);
890 ASSERT(MUTEX_HELD(&vdc->lock));
892 status = vdc_do_ldc_up(vdc);
894 DMSG(vdc, 0, "[%d] Finished bringing up LDC\n", vdc->instance);
921 vdc_create_io_kstats(vdc_t *vdc)
923 if (vdc->io_stats != NULL) {
924 DMSG(vdc, 0, "[%d] I/O kstat already exists\n", vdc->instance);
928 vdc->io_stats = kstat_create(VDC_DRIVER_NAME, vdc->instance, NULL,
930 if (vdc->io_stats != NULL) {
931 vdc->io_stats->ks_lock = &vdc->lock;
932 kstat_install(vdc->io_stats);
935 " will not be gathered", vdc->instance);
940 vdc_create_err_kstats(vdc_t *vdc)
946 int instance = vdc->instance;
948 if (vdc->err_stats != NULL) {
949 DMSG(vdc, 0, "[%d] ERR kstat already exists\n", vdc->instance);
958 vdc->err_stats = kstat_create(kstatmodule_err, instance, kstatname,
961 if (vdc->err_stats == NULL) {
967 stp = (vd_err_stats_t *)vdc->err_stats->ks_data;
981 vdc->err_stats->ks_update = nulldev;
983 kstat_install(vdc->err_stats);
987 vdc_set_err_kstats(vdc_t *vdc)
991 if (vdc->err_stats == NULL)
994 mutex_enter(&vdc->lock);
996 stp = (vd_err_stats_t *)vdc->err_stats->ks_data;
999 stp->vd_capacity.value.ui64 = vdc->vdisk_size * vdc->vdisk_bsize;
1003 mutex_exit(&vdc->lock);
1007 vdc_create_device_nodes_efi(vdc_t *vdc)
1009 ddi_remove_minor_node(vdc->dip, "h");
1010 ddi_remove_minor_node(vdc->dip, "h,raw");
1012 if (ddi_create_minor_node(vdc->dip, "wd", S_IFBLK,
1013 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE),
1016 vdc->instance);
1021 vdc->initialized |= VDC_MINOR;
1023 if (ddi_create_minor_node(vdc->dip, "wd,raw", S_IFCHR,
1024 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE),
1027 vdc->instance);
1035 vdc_create_device_nodes_vtoc(vdc_t *vdc)
1037 ddi_remove_minor_node(vdc->dip, "wd");
1038 ddi_remove_minor_node(vdc->dip, "wd,raw");
1040 if (ddi_create_minor_node(vdc->dip, "h", S_IFBLK,
1041 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE),
1044 vdc->instance);
1049 vdc->initialized |= VDC_MINOR;
1051 if (ddi_create_minor_node(vdc->dip, "h,raw", S_IFCHR,
1052 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE),
1055 vdc->instance);
1070 * to vdc.
1077 * vdc - soft state pointer
1084 vdc_create_device_nodes(vdc_t *vdc)
1092 ASSERT(vdc != NULL);
1094 instance = vdc->instance;
1095 dip = vdc->dip;
1097 switch (vdc->vdisk_type) {
1118 if (vdc->vdisk_label == VD_DISK_LABEL_EFI)
1119 status = vdc_create_device_nodes_efi(vdc);
1121 status = vdc_create_device_nodes_vtoc(vdc);
1136 vdc->initialized |= VDC_MINOR;
1160 vdc_t *vdc;
1164 vdc = ddi_get_soft_state(vdc_state, instance);
1166 if (dev == DDI_DEV_T_ANY || vdc == NULL) {
1171 mutex_enter(&vdc->lock);
1172 (void) vdc_validate_geometry(vdc);
1173 if (vdc->vdisk_label == VD_DISK_LABEL_UNK) {
1174 mutex_exit(&vdc->lock);
1178 nblocks = vdc->slice[VDCPART(dev)].nblocks;
1179 blksize = vdc->vdisk_bsize;
1180 mutex_exit(&vdc->lock);
1195 * vdc - soft state pointer
1202 vdc_is_opened(vdc_t *vdc)
1208 if (vdc->open_lyr[i] > 0)
1214 if (vdc->open[i] != 0)
1222 vdc_mark_opened(vdc_t *vdc, int slice, int flag, int otyp)
1229 ASSERT(MUTEX_HELD(&vdc->lock));
1239 if (vdc->vdisk_type == VD_DISK_TYPE_SLICE && slice != 0)
1243 if (vdc->open_excl & slicemask)
1248 if (vdc->open_lyr[slice] > 0)
1251 if (vdc->open[i] & slicemask)
1254 vdc->open_excl |= slicemask;
1259 vdc->open_lyr[slice]++;
1261 vdc->open[otyp] |= slicemask;
1268 vdc_mark_closed(vdc_t *vdc, int slice, int flag, int otyp)
1274 ASSERT(MUTEX_HELD(&vdc->lock));
1279 ASSERT(vdc->open_lyr[slice] > 0);
1280 vdc->open_lyr[slice]--;
1282 vdc->open[otyp] &= ~slicemask;
1286 vdc->open_excl &= ~slicemask;
1296 vdc_t *vdc;
1304 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) {
1309 DMSG(vdc, 0, "minor = %d flag = %x, otyp = %x\n",
1317 !(VD_OP_SUPPORTED(vdc->operations, VD_OP_BWRITE))) {
1321 mutex_enter(&vdc->lock);
1323 status = vdc_mark_opened(vdc, slice, flag, otyp);
1326 mutex_exit(&vdc->lock);
1335 if (vdc->vdisk_type != VD_DISK_TYPE_UNK && nodelay) {
1338 if (vdc->validate_pending > 0) {
1339 mutex_exit(&vdc->lock);
1345 (void *)vdc, TQ_NOSLEEP) == NULL) {
1346 vdc_mark_closed(vdc, slice, flag, otyp);
1347 mutex_exit(&vdc->lock);
1351 vdc->validate_pending++;
1352 mutex_exit(&vdc->lock);
1356 mutex_exit(&vdc->lock);
1358 vdc_validate(vdc);
1360 mutex_enter(&vdc->lock);
1362 if (vdc->vdisk_type == VD_DISK_TYPE_UNK ||
1363 (vdc->vdisk_type == VD_DISK_TYPE_SLICE && slice != 0) ||
1364 (!nodelay && (vdc->vdisk_label == VD_DISK_LABEL_UNK ||
1365 vdc->slice[slice].nblocks == 0))) {
1366 vdc_mark_closed(vdc, slice, flag, otyp);
1370 mutex_exit(&vdc->lock);
1383 vdc_t *vdc;
1390 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) {
1395 DMSG(vdc, 0, "[%d] flag = %x, otyp = %x\n", instance, flag, otyp);
1407 DMSG(vdc, 0, "[%d] flush failed with error %d on close\n",
1412 mutex_enter(&vdc->lock);
1413 vdc_mark_closed(vdc, slice, flag, otyp);
1414 mutex_exit(&vdc->lock);
1430 cmn_err(CE_NOTE, "vdc%d: %s", VDCUNIT(dev), str);
1440 vdc_t *vdc = NULL;
1443 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) {
1448 DMSG(vdc, 2, "[%d] dump %ld bytes at block 0x%lx : addr=0x%p\n",
1452 if ((blkno & vdc->vio_bmask) != 0) {
1453 DMSG(vdc, 0, "Misaligned block number (%lu)\n", blkno);
1456 vio_blkno = blkno >> vdc->vio_bshift;
1464 rv = vdc_do_op(vdc, VD_OP_BWRITE, addr, nbytes, VDCPART(dev),
1468 DMSG(vdc, 0, "Failed to do a disk dump (err=%d)\n", rv);
1472 DMSG(vdc, 0, "[%d] End\n", instance);
1495 vdc_t *vdc = NULL;
1500 if ((vdc = ddi_get_soft_state(vdc_state, instance)) == NULL) {
1507 DMSG(vdc, 2, "[%d] %s %ld bytes at block %llx : b_addr=0x%p\n",
1526 if ((buf->b_lblkno & vdc->vio_bmask) != 0) {
1531 vio_blkno = buf->b_lblkno >> vdc->vio_bshift;
1534 (void) vdc_do_op(vdc, op, (caddr_t)buf->b_un.b_addr,
1557 vdc_t *vdc = NULL;
1560 vdc = ddi_get_soft_state(vdc_state, instance);
1561 VERIFY(vdc != NULL);
1563 if (bufp->b_bcount > (vdc->max_xfer_sz * vdc->vdisk_bsize)) {
1564 bufp->b_bcount = vdc->max_xfer_sz * vdc->vdisk_bsize;
1619 * vdc - soft state pointer for this instance of the device driver.
1625 vdc_init_ver_negotiation(vdc_t *vdc, vio_ver_t ver)
1631 ASSERT(vdc != NULL);
1632 ASSERT(mutex_owned(&vdc->lock));
1634 DMSG(vdc, 0, "[%d] Entered.\n", vdc->instance);
1640 vdc->session_id = ((uint32_t)gettick() & 0xffffffff);
1641 DMSG(vdc, 0, "[%d] Set SID to 0x%lx\n", vdc->instance, vdc->session_id);
1646 pkt.tag.vio_sid = vdc->session_id;
1651 status = vdc_send(vdc, (caddr_t)&pkt, &msglen);
1652 DMSG(vdc, 0, "[%d] Ver info sent (status = %d)\n",
1653 vdc->instance, status);
1655 DMSG(vdc, 0, "[%d] Failed to send Ver negotiation info: "
1656 "id(%lx) rv(%d) size(%ld)", vdc->instance,
1657 vdc->curr_server->ldc_handle, status, msglen);
1715 * vdc - soft state pointer for this instance of the device driver.
1721 vdc_init_attr_negotiation(vdc_t *vdc)
1727 ASSERT(vdc != NULL);
1728 ASSERT(mutex_owned(&vdc->lock));
1730 DMSG(vdc, 0, "[%d] entered\n", vdc->instance);
1736 pkt.tag.vio_sid = vdc->session_id;
1738 pkt.max_xfer_sz = vdc->max_xfer_sz;
1739 pkt.vdisk_block_size = vdc->vdisk_bsize;
1746 status = vdc_send(vdc, (caddr_t)&pkt, &msglen);
1747 DMSG(vdc, 0, "Attr info sent (status = %d)\n", status);
1750 DMSG(vdc, 0, "[%d] Failed to send Attr negotiation info: "
1751 "id(%lx) rv(%d) size(%ld)", vdc->instance,
1752 vdc->curr_server->ldc_handle, status, msglen);
1767 * vdc - soft state pointer for this instance of the device driver.
1811 * vdc - soft state pointer for this instance of the device driver.
1817 vdc_init_dring_negotiate(vdc_t *vdc)
1825 ASSERT(vdc != NULL);
1826 ASSERT(mutex_owned(&vdc->lock));
1829 status = vdc_init_descriptor_ring(vdc);
1836 DMSG(vdc, 0, "[%d] Failed to init DRing (status = %d)\n",
1837 vdc->instance, status);
1841 DMSG(vdc, 0, "[%d] Init of descriptor ring completed (status = %d)\n",
1842 vdc->instance, status);
1848 pkt.tag.vio_sid = vdc->session_id;
1851 pkt.num_descriptors = vdc->dring_len;
1852 pkt.descriptor_size = vdc->dring_entry_size;
1854 pkt.ncookies = vdc->dring_cookie_count;
1855 pkt.cookie[0] = vdc->dring_cookie[0]; /* for now just one cookie */
1857 status = vdc_send(vdc, (caddr_t)&pkt, &msglen);
1859 DMSG(vdc, 0, "[%d] Failed to register DRing (err = %d)",
1860 vdc->instance, status);
1874 * vdc - soft state pointer for this instance of the device driver.
1919 * vdc - soft state pointer for this instance of the device driver.
1955 * vdc - soft state pointer for this instance of the device driver.
1983 * vdc - soft state pointer for this instance of the device driver.
2025 vdc_recv(vdc_t *vdc, vio_msg_t *msgp, size_t *nbytesp)
2046 * vdc->curr_server is protected by vdc->lock but to avoid
2050 * can change vdc->curr_server.
2052 status = ldc_read(vdc->curr_server->ldc_handle,
2064 DMSG(vdc, 0, "ldc_read returned %d\n", status);
2073 mutex_enter(&vdc->read_lock);
2075 while (vdc->read_state != VDC_READ_PENDING) {
2078 if (vdc->read_state == VDC_READ_RESET) {
2079 mutex_exit(&vdc->read_lock);
2083 vdc->read_state = VDC_READ_WAITING;
2084 cv_wait(&vdc->read_cv, &vdc->read_lock);
2087 vdc->read_state = VDC_READ_IDLE;
2088 mutex_exit(&vdc->read_lock);
2151 * ldc_handle - LDC handle for the channel this instance of vdc uses
2164 vdc_send(vdc_t *vdc, caddr_t pkt, size_t *msglen)
2170 ASSERT(vdc != NULL);
2171 ASSERT(mutex_owned(&vdc->lock));
2176 vdc_decode_tag(vdc, (vio_msg_t *)(uintptr_t)pkt);
2186 status = ldc_write(vdc->curr_server->ldc_handle, pkt, &size);
2196 /* if LDC had serious issues --- reset vdc state */
2198 /* LDC had serious issues --- reset vdc state */
2199 mutex_enter(&vdc->read_lock);
2200 if ((vdc->read_state == VDC_READ_WAITING) ||
2201 (vdc->read_state == VDC_READ_RESET))
2202 cv_signal(&vdc->read_cv);
2203 vdc->read_state = VDC_READ_RESET;
2204 mutex_exit(&vdc->read_lock);
2207 if (vdc->state == VDC_STATE_INIT_WAITING) {
2208 DMSG(vdc, 0, "[%d] write reset - "
2209 "vdc is resetting ..\n", vdc->instance);
2210 vdc->state = VDC_STATE_RESETTING;
2211 cv_signal(&vdc->initwait_cv);
2262 * The "cfg-handle" property of a vdc node in an MD contains the MD's
2325 DMSGX(1, "[%d] vdc inst in MD=%lx\n",
2356 * vdc - soft state pointer for this instance of the device driver.
2365 vdc_init_ports(vdc_t *vdc, md_t *mdp, mde_cookie_t vd_nodep)
2404 vdc->num_servers = 0;
2410 srvr->vdcp = vdc;
2461 if (vdc_do_ldc_init(vdc, srvr) != 0) {
2470 vdc->server_list = srvr;
2475 vdc->num_servers++;
2479 if (vdc->server_list != NULL) {
2480 vdc->curr_server = vdc->server_list;
2501 * vdc - soft state pointer for this instance of the device driver.
2509 vdc_do_ldc_up(vdc_t *vdc)
2514 ASSERT(MUTEX_HELD(&vdc->lock));
2516 DMSG(vdc, 0, "[%d] Bringing up channel %lx\n",
2517 vdc->instance, vdc->curr_server->ldc_id);
2519 if (vdc->lifecycle == VDC_LC_DETACHING)
2522 if ((status = ldc_up(vdc->curr_server->ldc_handle)) != 0) {
2525 DMSG(vdc, 0, "[%d] ldc_up(%lx,...) return %d\n",
2526 vdc->instance, vdc->curr_server->ldc_id, status);
2530 DMSG(vdc, 0, "[%d] Failed to bring up LDC: "
2531 "channel=%ld, err=%d", vdc->instance,
2532 vdc->curr_server->ldc_id, status);
2537 if (ldc_status(vdc->curr_server->ldc_handle, &ldc_state) == 0) {
2538 vdc->curr_server->ldc_state = ldc_state;
2540 DMSG(vdc, 0, "[%d] LDC channel already up\n",
2541 vdc->instance);
2542 vdc->seq_num = 1;
2543 vdc->seq_num_reply = 0;
2557 * vdc - soft state pointer for this instance of the device driver.
2558 * srvr - vdc per-server info structure
2564 vdc_terminate_ldc(vdc_t *vdc, vdc_server_t *srvr)
2566 int instance = ddi_get_instance(vdc->dip);
2569 DMSG(vdc, 0, "[%d] ldc_close()\n", instance);
2573 DMSG(vdc, 0, "[%d] ldc_unreg_callback()\n", instance);
2577 DMSG(vdc, 0, "[%d] ldc_fini()\n", instance);
2594 * vdc - soft state pointer for this instance of the device driver.
2600 vdc_fini_ports(vdc_t *vdc)
2602 int instance = ddi_get_instance(vdc->dip);
2605 ASSERT(vdc != NULL);
2606 ASSERT(mutex_owned(&vdc->lock));
2608 DMSG(vdc, 0, "[%d] initialized=%x\n", instance, vdc->initialized);
2610 srvr = vdc->server_list;
2614 vdc_terminate_ldc(vdc, srvr);
2624 vdc->server_list = NULL;
2625 vdc->num_servers = 0;
2641 * vdc - soft state pointer for this instance of the device driver.
2647 vdc_init_descriptor_ring(vdc_t *vdc)
2653 DMSG(vdc, 0, "[%d] initialized=%x\n", vdc->instance, vdc->initialized);
2655 ASSERT(vdc != NULL);
2656 ASSERT(mutex_owned(&vdc->lock));
2661 if ((vdc->initialized & VDC_DRING_INIT) == 0) {
2662 DMSG(vdc, 0, "[%d] ldc_mem_dring_create\n", vdc->instance);
2670 if ((vdc->max_xfer_sz * vdc->vdisk_bsize) < maxphys) {
2671 DMSG(vdc, 0, "[%d] using minimum DRing size\n",
2672 vdc->instance);
2673 vdc->dring_max_cookies = maxphys / PAGESIZE;
2675 vdc->dring_max_cookies =
2676 (vdc->max_xfer_sz * vdc->vdisk_bsize) / PAGESIZE;
2678 vdc->dring_entry_size = (sizeof (vd_dring_entry_t) +
2680 (vdc->dring_max_cookies - 1)));
2681 vdc->dring_len = VD_DRING_LEN;
2683 status = ldc_mem_dring_create(vdc->dring_len,
2684 vdc->dring_entry_size, &vdc->dring_hdl);
2685 if ((vdc->dring_hdl == NULL) || (status != 0)) {
2686 DMSG(vdc, 0, "[%d] Descriptor ring creation failed",
2687 vdc->instance);
2690 vdc->initialized |= VDC_DRING_INIT;
2693 if ((vdc->initialized & VDC_DRING_BOUND) == 0) {
2694 DMSG(vdc, 0, "[%d] ldc_mem_dring_bind\n", vdc->instance);
2695 vdc->dring_cookie =
2698 status = ldc_mem_dring_bind(vdc->curr_server->ldc_handle,
2699 vdc->dring_hdl,
2701 &vdc->dring_cookie[0],
2702 &vdc->dring_cookie_count);
2704 DMSG(vdc, 0, "[%d] Failed to bind descriptor ring "
2706 vdc->instance, vdc->dring_hdl,
2707 vdc->curr_server->ldc_handle, status);
2710 ASSERT(vdc->dring_cookie_count == 1);
2711 vdc->initialized |= VDC_DRING_BOUND;
2714 status = ldc_mem_dring_info(vdc->dring_hdl, &vdc->dring_mem_info);
2716 DMSG(vdc, 0,
2718 vdc->instance, vdc->dring_hdl);
2722 if ((vdc->initialized & VDC_DRING_LOCAL) == 0) {
2723 DMSG(vdc, 0, "[%d] local dring\n", vdc->instance);
2726 vdc->local_dring =
2727 kmem_zalloc(vdc->dring_len * sizeof (vdc_local_desc_t),
2729 vdc->initialized |= VDC_DRING_LOCAL;
2738 vdc->initialized |= VDC_DRING_ENTRY;
2739 for (i = 0; i < vdc->dring_len; i++) {
2740 dep = VDC_GET_DRING_ENTRY_PTR(vdc, i);
2743 status = ldc_mem_alloc_handle(vdc->curr_server->ldc_handle,
2744 &vdc->local_dring[i].desc_mhdl);
2746 DMSG(vdc, 0, "![%d] Failed to alloc mem handle for"
2747 " descriptor %d", vdc->instance, i);
2750 vdc->local_dring[i].is_free = B_TRUE;
2751 vdc->local_dring[i].dep = dep;
2755 vdc->dring_curr_idx = VDC_DRING_FIRST_ENTRY;
2767 * vdc - soft state pointer for this instance of the device driver.
2773 vdc_destroy_descriptor_ring(vdc_t *vdc)
2781 ASSERT(vdc != NULL);
2782 ASSERT(mutex_owned(&vdc->lock));
2784 DMSG(vdc, 0, "[%d] Entered\n", vdc->instance);
2786 if (vdc->initialized & VDC_DRING_ENTRY) {
2787 DMSG(vdc, 0,
2788 "[%d] Removing Local DRing entries\n", vdc->instance);
2789 for (i = 0; i < vdc->dring_len; i++) {
2790 ldep = &vdc->local_dring[i];
2797 DMSG(vdc, 0,
2818 vdc->initialized &= ~VDC_DRING_ENTRY;
2821 if (vdc->initialized & VDC_DRING_LOCAL) {
2822 DMSG(vdc, 0, "[%d] Freeing Local DRing\n", vdc->instance);
2823 kmem_free(vdc->local_dring,
2824 vdc->dring_len * sizeof (vdc_local_desc_t));
2825 vdc->initialized &= ~VDC_DRING_LOCAL;
2828 if (vdc->initialized & VDC_DRING_BOUND) {
2829 DMSG(vdc, 0, "[%d] Unbinding DRing\n", vdc->instance);
2830 status = ldc_mem_dring_unbind(vdc->dring_hdl);
2832 vdc->initialized &= ~VDC_DRING_BOUND;
2834 DMSG(vdc, 0, "[%d] Error %d unbinding DRing %lx",
2835 vdc->instance, status, vdc->dring_hdl);
2837 kmem_free(vdc->dring_cookie, sizeof (ldc_mem_cookie_t));
2840 if (vdc->initialized & VDC_DRING_INIT) {
2841 DMSG(vdc, 0, "[%d] Destroying DRing\n", vdc->instance);
2842 status = ldc_mem_dring_destroy(vdc->dring_hdl);
2844 vdc->dring_hdl = NULL;
2845 bzero(&vdc->dring_mem_info, sizeof (ldc_mem_info_t));
2846 vdc->initialized &= ~VDC_DRING_INIT;
2848 DMSG(vdc, 0, "[%d] Error %d destroying DRing (%lx)",
2849 vdc->instance, status, vdc->dring_hdl);
2947 * calls) for performance reasons - we are already holding vdc->lock
3220 * vdc - the soft state pointer
3237 vdc_do_op(vdc_t *vdc, int op, caddr_t addr, size_t nbytes, int slice,
3256 rv = vdc_send_request(vdc, op, addr, nbytes, slice, offset, bufp,
3271 rv = vdc_drain_response(vdc, bufp);
3277 rv = vdc_wait_for_response(vdc, &vio_msg);
3280 rv = vdc_process_data_msg(vdc, &vio_msg);
3290 mutex_enter(&vdc->lock);
3293 VD_KSTAT_RUNQ_BACK_TO_WAITQ(vdc);
3295 VD_KSTAT_RUNQ_EXIT(vdc);
3299 mutex_exit(&vdc->lock);
3415 * handled differently because interrupts are disabled and vdc
3422 * vdc - soft state pointer for this instance of the device driver.
3432 vdc_drain_response(vdc_t *vdc, struct buf *buf)
3441 mutex_enter(&vdc->lock);
3446 rv = ldc_read(vdc->curr_server->ldc_handle, (caddr_t)&dmsg,
3472 DMSG(vdc, 0, "discard pkt: type=%d sub=%d env=%d\n",
3496 if (idx >= vdc->dring_len) {
3497 DMSG(vdc, 0, "[%d] Bogus ack data : start %d\n",
3498 vdc->instance, idx);
3501 ldep = &vdc->local_dring[idx];
3503 DMSG(vdc, 0, "[%d] Entry @ %d - state !DONE %d\n",
3504 vdc->instance, idx, ldep->dep->hdr.dstate);
3514 rv = vdc_depopulate_descriptor(vdc, idx);
3521 if ((idx + 1) % vdc->dring_len == vdc->dring_curr_idx) {
3533 mutex_exit(&vdc->lock);
3534 DMSG(vdc, 0, "End idx=%d\n", idx);
3547 * vdc - soft state pointer for this instance of the device driver.
3554 vdc_depopulate_descriptor(vdc_t *vdc, uint_t idx)
3561 ASSERT(vdc != NULL);
3562 ASSERT(idx < vdc->dring_len);
3563 ldep = &vdc->local_dring[idx];
3565 ASSERT(MUTEX_HELD(&vdc->lock));
3567 DTRACE_PROBE2(depopulate, int, vdc->instance, vdc_local_desc_t *, ldep);
3568 DMSG(vdc, 2, ": idx = %d\n", idx);
3575 VDC_MARK_DRING_ENTRY_FREE(vdc, idx);
3579 DMSG(vdc, 2, ": is_free = %d : status = %d\n", ldep->is_free, status);
3587 cv_signal(&vdc->dring_free_cv);
3609 DMSG(vdc, 0, "?[%d] unbind mhdl 0x%lx @ idx %d failed (%d)",
3610 vdc->instance, ldep->desc_mhdl, idx, rv);
3620 cv_signal(&vdc->membind_cv);
3621 cv_signal(&vdc->dring_free_cv);
3633 * vdc - soft state pointer for this instance of the device driver.
3673 ASSERT(0); /* catch bad programming in vdc */
3760 vdc_t *vdc = srvr->vdcp;
3762 ASSERT(vdc != NULL);
3764 DMSG(vdc, 1, "evt=%lx seqID=%ld\n", event, vdc->seq_num);
3767 mutex_enter(&vdc->lock);
3769 if (vdc->curr_server != srvr) {
3770 DMSG(vdc, 0, "[%d] Ignoring event 0x%lx for port@%ld\n",
3771 vdc->instance, event, srvr->id);
3772 mutex_exit(&vdc->lock);
3785 DMSG(vdc, 0, "[%d] Received LDC_EVT_UP\n", vdc->instance);
3790 DMSG(vdc, 0, "[%d] Couldn't get LDC status %d",
3791 vdc->instance, rv);
3792 mutex_exit(&vdc->lock);
3802 vdc->seq_num = 1;
3803 vdc->seq_num_reply = 0;
3804 vdc->io_pending = B_TRUE;
3806 cv_signal(&vdc->initwait_cv);
3807 cv_signal(&vdc->io_pending_cv);
3812 DMSG(vdc, 1, "[%d] Received LDC_EVT_READ\n", vdc->instance);
3813 mutex_enter(&vdc->read_lock);
3814 cv_signal(&vdc->read_cv);
3815 vdc->read_state = VDC_READ_PENDING;
3816 mutex_exit(&vdc->read_lock);
3817 mutex_exit(&vdc->lock);
3825 DMSG(vdc, 0, "[%d] Received LDC RESET event\n", vdc->instance);
3831 mutex_enter(&vdc->read_lock);
3832 if ((vdc->read_state == VDC_READ_WAITING) ||
3833 (vdc->read_state == VDC_READ_RESET))
3834 cv_signal(&vdc->read_cv);
3835 vdc->read_state = VDC_READ_RESET;
3836 mutex_exit(&vdc->read_lock);
3839 if (vdc->state == VDC_STATE_INIT_WAITING) {
3840 vdc->state = VDC_STATE_RESETTING;
3841 cv_signal(&vdc->initwait_cv);
3842 } else if (vdc->state == VDC_STATE_FAILED) {
3843 vdc->io_pending = B_TRUE;
3844 cv_signal(&vdc->io_pending_cv);
3849 mutex_exit(&vdc->lock);
3852 DMSG(vdc, 0, "![%d] Unexpected LDC event (%lx) received",
3853 vdc->instance, event);
4418 * vdc - soft state pointer for this instance of the device driver.
4929 * be an ACK or NACK from vds[1] which vdc handles as follows.
4934 * VIO_SUBTYPE_INFO message to vdc asking it to read data; if for
4935 * some bizarre reason it does, vdc will reset the connection.
4938 * vdc - soft state pointer for this instance of the device driver.
4982 * Verify that the sequence number is what vdc expects.
5101 * vdc - soft state pointer for this instance of the device driver.
5108 vdc_handle_ver_msg(vdc_t *vdc, vio_ver_msg_t *ver_msg)
5112 ASSERT(vdc != NULL);
5113 ASSERT(mutex_owned(&vdc->lock));
5131 vdc->ver.major = ver_msg->ver_major;
5132 vdc->ver.minor = ver_msg->ver_minor;
5133 ASSERT(vdc->ver.major > 0);
5148 ASSERT(vdc->ver.major > 0);
5154 status = vdc_send(vdc, (caddr_t)ver_msg, &len);
5155 DMSG(vdc, 0, "[%d] Resend VER info (LDC status = %d)\n",
5156 vdc->instance, status);
5160 DMSG(vdc, 0, "[%d] No common version with vDisk server",
5161 vdc->instance);
5169 * (for now only vdc is the instigator)
5189 * vdc - soft state pointer for this instance of the device driver.
5196 vdc_handle_attr_msg(vdc_t *vdc, vd_attr_msg_t *attr_msg)
5201 ASSERT(vdc != NULL);
5202 ASSERT(mutex_owned(&vdc->lock));
5214 DMSG(vdc, 0, "[%d] Invalid disk size from vds",
5215 vdc->instance);
5221 DMSG(vdc, 0, "[%d] Invalid transfer size from vds",
5222 vdc->instance);
5228 DMSG(vdc, 0, "[%d] Unknown disk size from vds",
5229 vdc->instance);
5235 vdc_update_vio_bsize(vdc,
5237 DMSG(vdc, 0, "[%d] Invalid block size (%u) from vds",
5238 vdc->instance, attr_msg->vdisk_block_size);
5244 old_type = vdc->vdisk_type;
5245 vdc_update_size(vdc, attr_msg->vdisk_size,
5247 vdc->vdisk_type = attr_msg->vdisk_type;
5248 vdc->operations = attr_msg->operations;
5249 if (vio_ver_is_supported(vdc->ver, 1, 1))
5250 vdc->vdisk_media = attr_msg->vdisk_media;
5252 vdc->vdisk_media = 0;
5254 DMSG(vdc, 0, "[%d] max_xfer_sz: sent %lx acked %lx\n",
5255 vdc->instance, vdc->max_xfer_sz, attr_msg->max_xfer_sz);
5256 DMSG(vdc, 0, "[%d] vdisk_block_size: sent %lx acked %x\n",
5257 vdc->instance, vdc->vdisk_bsize,
5264 DMSG(vdc, 0, "[%d] Invalid attributes from vds",
5265 vdc->instance);
5274 vdc_create_fake_geometry(vdc);
5283 (vdc->initialized & VDC_MINOR) &&
5284 vdc->vdisk_type == VD_DISK_TYPE_SLICE) {
5285 ddi_remove_minor_node(vdc->dip, NULL);
5286 (void) devfs_clean(ddi_get_parent(vdc->dip),
5288 if (vdc_create_device_nodes(vdc) != 0) {
5289 DMSG(vdc, 0, "![%d] Failed to update "
5290 "device nodes", vdc->instance);
5307 * (for now; vdc is the only supported instigatior)
5327 * vdc - soft state pointer for this instance of the driver.
5334 vdc_handle_dring_reg_msg(vdc_t *vdc, vio_dring_reg_msg_t *dring_msg)
5338 ASSERT(vdc != NULL);
5339 ASSERT(mutex_owned(&vdc->lock));
5348 vdc->dring_ident = dring_msg->dring_ident;
5349 DMSG(vdc, 0, "[%d] Received dring ident=0x%lx\n",
5350 vdc->instance, vdc->dring_ident);
5358 DMSG(vdc, 0, "[%d] server could not register DRing\n",
5359 vdc->instance);
5366 * (for now only vdc is the instigatior)
5385 * to the last seq num generated by vdc).
5391 * vdc - soft state pointer for this instance of the driver.
5398 * vdc cannot deal with them
5401 vdc_verify_seq_num(vdc_t *vdc, vio_dring_msg_t *dring_msg)
5403 ASSERT(vdc != NULL);
5405 ASSERT(mutex_owned(&vdc->lock));
5411 if ((dring_msg->seq_num <= vdc->seq_num_reply) ||
5412 (dring_msg->seq_num > vdc->seq_num)) {
5413 DMSG(vdc, 0, "?[%d] Bogus sequence_number %lu: "
5415 vdc->instance, dring_msg->seq_num,
5416 vdc->seq_num_reply, vdc->seq_num,
5417 vdc->req_id_proc, vdc->req_id);
5420 vdc->seq_num_reply = dring_msg->seq_num;
5422 if (vdc->req_id_proc < vdc->req_id)
5514 vdc_t *vdc;
5533 vdc_t *vdc = NULL;
5541 vdc = dk_arg->vdc;
5542 ASSERT(vdc != NULL);
5544 rv = vdc_do_sync_op(vdc, VD_OP_FLUSH, NULL, 0,
5547 DMSG(vdc, 0, "[%d] DKIOCFLUSHWRITECACHE failed %d : model %x\n",
5548 vdc->instance, rv,
5564 mutex_enter(&vdc->lock);
5565 vdc->dkio_flush_pending--;
5566 ASSERT(vdc->dkio_flush_pending >= 0);
5567 mutex_exit(&vdc->lock);
5581 * vdc - soft state pointer
5586 vdc_dkio_gapart(vdc_t *vdc, caddr_t arg, int flag)
5596 mutex_enter(&vdc->lock);
5598 if ((rv = vdc_validate_geometry(vdc)) != 0) {
5599 mutex_exit(&vdc->lock);
5603 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT) {
5604 mutex_exit(&vdc->lock);
5608 vtoc = vdc->vtoc;
5609 geom = vdc->geom;
5631 mutex_exit(&vdc->lock);
5647 * vdc - soft state pointer
5652 vdc_dkio_partition(vdc_t *vdc, caddr_t arg, int flag)
5665 VDC_EFI_DEV_SET(edev, vdc, vd_process_efi_ioctl);
5706 vdc_dioctl_rwcmd(vdc_t *vdc, caddr_t arg, int flag)
5750 auio.uio_loffset = rwcmd.blkaddr * vdc->vdisk_bsize;
5762 status = physio(vdc_strategy, buf, VD_MAKE_DEV(vdc->instance, 0),
5820 vdc_scsi_status(vdc_t *vdc, vd_scsi_t *vd_scsi, boolean_t log_error)
5837 cmn_err(CE_WARN, "%s (vdc%d):\tError for Command: 0x%x)\n",
5838 ddi_pathname(vdc->dip, path_str), vdc->instance,
5887 if (vdc->failfast_interval != 0 &&
5893 ddi_pathname(vdc->dip, path_str));
5935 vdc_uscsi_cmd(vdc_t *vdc, caddr_t arg, int mode)
5961 rv = vdc_do_sync_op(vdc, VD_OP_RESET, NULL, 0, 0, 0,
6038 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len,
6098 rv = vdc_scsi_status(vdc, vd_scsi,
6188 vdc_mhd_inkeys(vdc_t *vdc, caddr_t arg, int mode)
6232 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len,
6286 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE);
6300 vdc_mhd_inresv(vdc_t *vdc, caddr_t arg, int mode)
6346 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len,
6415 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE);
6428 vdc_mhd_register(vdc_t *vdc, caddr_t arg, int mode)
6451 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len,
6455 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE);
6467 vdc_mhd_reserve(vdc_t *vdc, caddr_t arg, int mode)
6492 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len,
6496 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE);
6508 vdc_mhd_preemptabort(vdc_t *vdc, caddr_t arg, int mode)
6537 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len,
6541 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE);
6553 vdc_mhd_registerignore(vdc_t *vdc, caddr_t arg, int mode)
6576 rv = vdc_do_sync_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len,
6580 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE);
6590 vdc_eio_scsi_cmd(vdc_t *vdc, uchar_t scmd, int flags)
6621 rv = vdc_do_op(vdc, VD_OP_SCSICMD, (caddr_t)vd_scsi, vd_scsi_len,
6625 rv = vdc_scsi_status(vdc, vd_scsi, B_FALSE);
6641 vdc_eio_scsi_check(vdc_t *vdc, int flags)
6654 rv = vdc_eio_scsi_cmd(vdc, SCMD_TEST_UNIT_READY, flags);
6659 if (vdc->failfast_interval == 0)
6668 if (vdc_eio_scsi_cmd(vdc, SCMD_WRITE_G1, flags) != 0)
6682 vdc_eio_check(vdc_t *vdc, int flags)
6692 if (VD_OP_SUPPORTED(vdc->operations, VD_OP_SCSICMD))
6693 return (vdc_eio_scsi_check(vdc, flags));
6695 ASSERT(vdc->failfast_interval == 0);
6706 buffer = kmem_alloc(vdc->vdisk_bsize, KM_SLEEP);
6708 if (vdc->vdisk_size > 0) {
6713 blkno = blkno % vdc->vdisk_size;
6714 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)buffer,
6715 vdc->vdisk_bsize, VD_SLICE_NONE, blkno, NULL,
6722 blkno = vdc->vdisk_size - 1;
6723 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)buffer,
6724 vdc->vdisk_bsize, VD_SLICE_NONE, blkno, NULL,
6733 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)buffer, vdc->vdisk_bsize,
6737 kmem_free(buffer, vdc->vdisk_bsize);
6748 vdc_eio_queue(vdc_t *vdc, int index)
6752 ASSERT(MUTEX_HELD(&vdc->lock));
6755 vio->vio_next = vdc->eio_queue;
6759 vdc->eio_queue = vio;
6762 cv_signal(&vdc->eio_cv);
6773 vdc_eio_unqueue(vdc_t *vdc, clock_t deadline, boolean_t complete_io)
6779 ASSERT(MUTEX_HELD(&vdc->lock));
6782 vio = vdc->eio_queue;
6801 vdc->eio_queue = NULL;
6815 op = vdc->local_dring[index].operation;
6816 buf = vdc->local_dring[index].buf;
6817 (void) vdc_depopulate_descriptor(vdc, index);
6820 VD_UPDATE_ERR_STATS(vdc, vd_softerrs);
6821 VD_KSTAT_RUNQ_EXIT(vdc);
6851 vdc_t *vdc = (vdc_t *)arg;
6852 clock_t starttime, timeout = drv_usectohz(vdc->failfast_interval);
6854 mutex_enter(&vdc->lock);
6856 while (vdc->failfast_interval != 0 || vdc->num_servers > 1) {
6861 if (vdc->eio_queue == NULL || vdc->state != VDC_STATE_RUNNING) {
6862 if (vdc->failfast_interval != 0) {
6864 drv_usectohz(vdc->failfast_interval);
6865 (void) cv_timedwait(&vdc->eio_cv, &vdc->lock,
6868 ASSERT(vdc->num_servers > 1);
6869 (void) cv_wait(&vdc->eio_cv, &vdc->lock);
6872 if (vdc->state != VDC_STATE_RUNNING)
6876 mutex_exit(&vdc->lock);
6881 status = vdc_eio_check(vdc, VDC_OP_STATE_RUNNING);
6883 mutex_enter(&vdc->lock);
6888 if (vdc->failfast_interval == 0 && vdc->num_servers <= 1)
6895 if (vdc->state != VDC_STATE_RUNNING || vdc->eio_queue == NULL)
6903 vdc_eio_unqueue(vdc, starttime, B_TRUE);
6905 } else if (vdc->num_servers > 1) {
6912 mutex_enter(&vdc->read_lock);
6913 vdc->read_state = VDC_READ_RESET;
6914 cv_signal(&vdc->read_cv);
6915 mutex_exit(&vdc->read_lock);
6923 vdc_eio_unqueue(vdc, starttime, B_TRUE);
6930 vdc_eio_unqueue(vdc, 0, B_TRUE);
6931 vdc->eio_thread = NULL;
6932 mutex_exit(&vdc->lock);
6940 vdc_failfast(vdc_t *vdc, caddr_t arg, int mode)
6947 mutex_enter(&vdc->lock);
6948 if (mh_time != 0 && vdc->eio_thread == NULL) {
6949 vdc->eio_thread = thread_create(NULL, 0,
6950 vdc_eio_thread, vdc, 0, &p0, TS_RUN,
6954 vdc->failfast_interval = ((long)mh_time) * MILLISEC;
6955 cv_signal(&vdc->eio_cv);
6956 mutex_exit(&vdc->lock);
6966 vdc_access_set(vdc_t *vdc, uint64_t flags)
6971 rv = vdc_do_sync_op(vdc, VD_OP_SET_ACCESS, (caddr_t)&flags,
6982 vdc_access_get(vdc_t *vdc, uint64_t *status)
6987 rv = vdc_do_sync_op(vdc, VD_OP_GET_ACCESS, (caddr_t)status,
7007 vdc_t *vdc = (vdc_t *)arg;
7011 mutex_enter(&vdc->ownership_lock);
7012 mutex_enter(&vdc->lock);
7014 while (vdc->ownership & VDC_OWNERSHIP_WANTED) {
7016 if ((vdc->ownership & VDC_OWNERSHIP_RESET) ||
7017 !(vdc->ownership & VDC_OWNERSHIP_GRANTED)) {
7024 DMSG(vdc, 0, "[%d] Ownership lost, recovering",
7025 vdc->instance);
7027 vdc->ownership &= ~(VDC_OWNERSHIP_RESET |
7030 mutex_exit(&vdc->lock);
7032 status = vdc_access_set(vdc, VD_ACCESS_SET_EXCLUSIVE |
7035 mutex_enter(&vdc->lock);
7038 DMSG(vdc, 0, "[%d] Ownership recovered",
7039 vdc->instance);
7040 vdc->ownership |= VDC_OWNERSHIP_GRANTED;
7042 DMSG(vdc, 0, "[%d] Fail to recover ownership",
7043 vdc->instance);
7053 if (vdc->ownership & VDC_OWNERSHIP_GRANTED)
7058 /* Release the ownership_lock and wait on the vdc lock */
7059 mutex_exit(&vdc->ownership_lock);
7062 (void) cv_wait(&vdc->ownership_cv, &vdc->lock);
7064 (void) cv_reltimedwait(&vdc->ownership_cv, &vdc->lock,
7067 mutex_exit(&vdc->lock);
7069 mutex_enter(&vdc->ownership_lock);
7070 mutex_enter(&vdc->lock);
7073 vdc->ownership_thread = NULL;
7074 mutex_exit(&vdc->lock);
7075 mutex_exit(&vdc->ownership_lock);
7081 vdc_ownership_update(vdc_t *vdc, int ownership_flags)
7083 ASSERT(MUTEX_HELD(&vdc->ownership_lock));
7085 mutex_enter(&vdc->lock);
7086 vdc->ownership = ownership_flags;
7087 if ((vdc->ownership & VDC_OWNERSHIP_WANTED) &&
7088 vdc->ownership_thread == NULL) {
7090 vdc->ownership_thread = thread_create(NULL, 0,
7091 vdc_ownership_thread, vdc, 0, &p0, TS_RUN,
7095 cv_signal(&vdc->ownership_cv);
7097 mutex_exit(&vdc->lock);
7104 vdc_get_capacity(vdc_t *vdc, size_t *dsk_size, size_t *blk_size)
7110 ASSERT(MUTEX_NOT_HELD(&vdc->lock));
7116 rv = vdc_do_sync_op(vdc, VD_OP_GET_CAPACITY, (caddr_t)vd_cap, alloc_len,
7133 vdc_check_capacity(vdc_t *vdc)
7143 if (!VD_OP_SUPPORTED(vdc->operations, VD_OP_GET_CAPACITY))
7146 if ((rv = vdc_get_capacity(vdc, &dsk_size, &blk_size)) != 0)
7152 mutex_enter(&vdc->lock);
7159 rv = vdc_update_vio_bsize(vdc, blk_size);
7161 vdc_update_size(vdc, dsk_size, blk_size, vdc->max_xfer_sz);
7163 mutex_exit(&vdc->lock);
7177 int (*convert)(vdc_t *vdc, void *vd_buf, void *ioctl_arg,
7233 * These particular ioctls are not sent to the server - vdc fakes up
7252 vdc_t *vdc = (vdc_t *)vdisk;
7256 dev = makedevice(ddi_driver_major(vdc->dip),
7257 VD_MAKE_DEV(vdc->instance, 0));
7288 vdc_t *vdc = NULL;
7297 vdc = ddi_get_soft_state(vdc_state, instance);
7298 if (vdc == NULL) {
7304 DMSG(vdc, 0, "[%d] Processing ioctl(%x) for dev %lx : model %x\n",
7324 DMSG(vdc, 0, "[%d] Unsupported ioctl (0x%x)\n",
7325 vdc->instance, cmd);
7362 if (vdc->cinfo == NULL)
7364 if (vdc->cinfo->dki_ctype != DKC_SCSI_CCS)
7369 if (vdc->cinfo == NULL)
7371 if (vdc->cinfo->dki_ctype != DKC_DIRECT)
7376 if (vdc->cinfo == NULL)
7381 if (vdc->minfo == NULL)
7383 if (vdc_check_capacity(vdc) != 0)
7398 return (vdc_uscsi_cmd(vdc, arg, mode));
7403 mutex_enter(&vdc->ownership_lock);
7409 vdc_ownership_update(vdc, VDC_OWNERSHIP_WANTED);
7411 rv = vdc_access_set(vdc, VD_ACCESS_SET_EXCLUSIVE |
7414 vdc_ownership_update(vdc, VDC_OWNERSHIP_WANTED |
7417 vdc_ownership_update(vdc, VDC_OWNERSHIP_NONE);
7419 mutex_exit(&vdc->ownership_lock);
7425 mutex_enter(&vdc->ownership_lock);
7426 rv = vdc_access_set(vdc, VD_ACCESS_SET_CLEAR);
7428 vdc_ownership_update(vdc, VDC_OWNERSHIP_NONE);
7430 mutex_exit(&vdc->ownership_lock);
7438 rv = vdc_access_get(vdc, &status);
7446 rv = vdc_access_set(vdc, VD_ACCESS_SET_EXCLUSIVE);
7452 return (vdc_mhd_inkeys(vdc, arg, mode));
7457 return (vdc_mhd_inresv(vdc, arg, mode));
7462 return (vdc_mhd_register(vdc, arg, mode));
7467 return (vdc_mhd_reserve(vdc, arg, mode));
7472 return (vdc_mhd_preemptabort(vdc, arg, mode));
7477 return (vdc_mhd_registerignore(vdc, arg, mode));
7482 rv = vdc_failfast(vdc, arg, mode);
7488 return (vdc_dioctl_rwcmd(vdc, arg, mode));
7493 return (vdc_dkio_gapart(vdc, arg, mode));
7498 return (vdc_dkio_partition(vdc, arg, mode));
7505 bcopy(vdc->cinfo, &cinfo, sizeof (struct dk_cinfo));
7518 ASSERT(vdc->vdisk_size != 0);
7519 ASSERT(vdc->minfo->dki_capacity != 0);
7520 rv = ddi_copyout(vdc->minfo, (void *)arg,
7534 DMSG(vdc, 1, "[%d] Flush W$: mode %x\n",
7567 mutex_enter(&vdc->lock);
7568 vdc->dkio_flush_pending++;
7569 dkarg->vdc = vdc;
7570 mutex_exit(&vdc->lock);
7577 mutex_enter(&vdc->lock);
7578 vdc->dkio_flush_pending--;
7579 mutex_exit(&vdc->lock);
7587 /* catch programming error in vdc - should be a VD_OP_XXX ioctl */
7591 if (VD_OP_SUPPORTED(vdc->operations, iop->op) == B_FALSE) {
7592 DMSG(vdc, 0, "[%d] Unsupported VD_OP operation (0x%x)\n",
7593 vdc->instance, iop->op);
7599 DMSG(vdc, 1, "[%d] struct size %ld alloc %ld\n",
7611 rv = (iop->convert)(vdc, arg, mem_p, mode, VD_COPYIN);
7613 DMSG(vdc, 0, "[%d] convert func returned %d for ioctl 0x%x\n",
7623 rv = vdc_do_sync_op(vdc, iop->op, mem_p, alloc_len,
7632 DMSG(vdc, 0, "[%d] vds returned %d for ioctl 0x%x\n",
7646 rv = (iop->convert)(vdc, mem_p, arg, mode, VD_COPYOUT);
7648 DMSG(vdc, 0, "[%d] convert func returned %d for ioctl 0x%x\n",
7669 vdc_null_copy_func(vdc_t *vdc, void *from, void *to, int mode, int dir)
7671 _NOTE(ARGUNUSED(vdc))
7681 vdc_get_wce_convert(vdc_t *vdc, void *from, void *to,
7684 _NOTE(ARGUNUSED(vdc))
7696 vdc_set_wce_convert(vdc_t *vdc, void *from, void *to,
7699 _NOTE(ARGUNUSED(vdc))
7724 * vdc - the vDisk client
7736 vdc_get_vtoc_convert(vdc_t *vdc, void *from, void *to, int mode, int dir)
7750 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT)
7757 evtoc.timestamp[i] = vdc->vtoc->timestamp[i];
7785 * vdc - the vDisk client
7797 vdc_set_vtoc_convert(vdc_t *vdc, void *from, void *to, int mode, int dir)
7808 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT)
7830 vdc_validate(vdc);
7836 vdc->vtoc->timestamp[i] = evtoc.timestamp[i];
7847 vdc_get_extvtoc_convert(vdc_t *vdc, void *from, void *to, int mode, int dir)
7862 evtoc.timestamp[i] = vdc->vtoc->timestamp[i];
7873 vdc_set_extvtoc_convert(vdc_t *vdc, void *from, void *to, int mode, int dir)
7893 vdc_validate(vdc);
7899 vdc->vtoc->timestamp[i] = evtoc.timestamp[i];
7919 * vdc - the vDisk client
7931 vdc_get_geom_convert(vdc_t *vdc, void *from, void *to, int mode, int dir)
7933 _NOTE(ARGUNUSED(vdc))
7962 * vdc - the vDisk client
7974 vdc_set_geom_convert(vdc_t *vdc, void *from, void *to, int mode, int dir)
7976 _NOTE(ARGUNUSED(vdc))
8004 vdc_get_efi_convert(vdc_t *vdc, void *from, void *to, int mode, int dir)
8006 _NOTE(ARGUNUSED(vdc))
8052 vdc_set_efi_convert(vdc_t *vdc, void *from, void *to, int mode, int dir)
8054 _NOTE(ARGUNUSED(vdc))
8064 vdc_validate(vdc);
8103 * vdc - soft state pointer for this instance of the device driver.
8109 vdc_create_fake_geometry(vdc_t *vdc)
8111 ASSERT(vdc != NULL);
8112 ASSERT(vdc->max_xfer_sz != 0);
8117 if (vdc->cinfo == NULL)
8118 vdc->cinfo = kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP);
8120 (void) strcpy(vdc->cinfo->dki_cname, VDC_DRIVER_NAME);
8121 (void) strcpy(vdc->cinfo->dki_dname, VDC_DRIVER_NAME);
8123 vdc->cinfo->dki_maxtransfer = vdc->max_xfer_sz;
8134 switch (vdc->vdisk_media) {
8137 vdc->cinfo->dki_ctype = DKC_CDROM;
8140 if (VD_OP_SUPPORTED(vdc->operations, VD_OP_SCSICMD))
8141 vdc->cinfo->dki_ctype = DKC_SCSI_CCS;
8143 vdc->cinfo->dki_ctype = DKC_DIRECT;
8147 vdc->cinfo->dki_ctype = DKC_DIRECT;
8150 vdc->cinfo->dki_flags = DKI_FMTVOL;
8151 vdc->cinfo->dki_cnum = 0;
8152 vdc->cinfo->dki_addr = 0;
8153 vdc->cinfo->dki_space = 0;
8154 vdc->cinfo->dki_prio = 0;
8155 vdc->cinfo->dki_vec = 0;
8156 vdc->cinfo->dki_unit = vdc->instance;
8157 vdc->cinfo->dki_slave = 0;
8162 vdc->cinfo->dki_partition = 0;
8167 if (vdc->minfo == NULL)
8168 vdc->minfo = kmem_zalloc(sizeof (struct dk_minfo), KM_SLEEP);
8170 if (vio_ver_is_supported(vdc->ver, 1, 1)) {
8171 vdc->minfo->dki_media_type =
8172 VD_MEDIATYPE2DK_MEDIATYPE(vdc->vdisk_media);
8174 vdc->minfo->dki_media_type = DK_FIXED_DISK;
8177 vdc->minfo->dki_capacity = vdc->vdisk_size;
8178 vdc->minfo->dki_lbsize = vdc->vdisk_bsize;
8198 vdc_update_size(vdc_t *vdc, size_t dsk_size, size_t blk_size, size_t xfr_size)
8202 ASSERT(MUTEX_HELD(&vdc->lock));
8210 (blk_size == vdc->vdisk_bsize && dsk_size == vdc->vdisk_size &&
8211 xfr_size == vdc->max_xfer_sz))
8217 * prevent memory exhaustion in vdc if it was allocating a DRing
8222 DMSG(vdc, 0, "[%d] vds block transfer size too big;"
8223 " using max supported by vdc", vdc->instance);
8227 vdc->max_xfer_sz = xfr_size;
8228 vdc->vdisk_bsize = blk_size;
8229 vdc->vdisk_size = dsk_size;
8231 stp = (vd_err_stats_t *)vdc->err_stats->ks_data;
8234 vdc->minfo->dki_capacity = dsk_size;
8235 vdc->minfo->dki_lbsize = (uint_t)blk_size;
8240 * same as the vdisk block size which is stored in vdc->vdisk_bsize so we
8253 vdc_update_vio_bsize(vdc_t *vdc, uint32_t blk_size)
8258 vdc->vio_bmask = 0;
8259 vdc->vio_bshift = 0;
8276 vdc->vio_bshift = nshift;
8277 vdc->vio_bmask = ratio - 1;
8288 * the disk label and related information in the vdc structure. If it
8293 * vdc - soft state pointer for this instance of the device driver.
8302 vdc_validate_geometry(vdc_t *vdc)
8313 ASSERT(vdc != NULL);
8314 ASSERT(vdc->vtoc != NULL && vdc->geom != NULL);
8315 ASSERT(MUTEX_HELD(&vdc->lock));
8317 mutex_exit(&vdc->lock);
8322 (void) vdc_check_capacity(vdc);
8323 dev = makedevice(ddi_driver_major(vdc->dip),
8324 VD_MAKE_DEV(vdc->instance, 0));
8339 if (vdc->vdisk_size == 0) {
8340 mutex_enter(&vdc->lock);
8341 vdc_store_label_unk(vdc);
8345 VDC_EFI_DEV_SET(edev, vdc, vd_process_efi_ioctl);
8350 DMSG(vdc, 0, "[%d] Failed to get EFI (err=%d)",
8351 vdc->instance, rv);
8352 mutex_enter(&vdc->lock);
8353 vdc_store_label_unk(vdc);
8357 mutex_enter(&vdc->lock);
8358 vdc_store_label_efi(vdc, gpt, gpe);
8364 DMSG(vdc, 0, "[%d] Failed to get VTOC (err=%d)",
8365 vdc->instance, rv);
8366 mutex_enter(&vdc->lock);
8367 vdc_store_label_unk(vdc);
8376 mutex_enter(&vdc->lock);
8377 vdc_store_label_unk(vdc);
8390 * to the server. This will be the default if vdc is implemented
8398 if (vdc->vdisk_type == VD_DISK_TYPE_SLICE) {
8399 mutex_enter(&vdc->lock);
8401 vdc_store_label_unk(vdc);
8404 vdc_store_label_vtoc(vdc, &geom, &vtoc);
8409 mutex_enter(&vdc->lock);
8410 vdc_store_label_unk(vdc);
8419 if (vdc->vdisk_media == VD_MEDIA_CD ||
8420 vdc->vdisk_media == VD_MEDIA_DVD) {
8421 mutex_enter(&vdc->lock);
8422 vdc_store_label_vtoc(vdc, &geom, &vtoc);
8429 label = kmem_alloc(vdc->vdisk_bsize, KM_SLEEP);
8431 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)label, vdc->vdisk_bsize,
8436 DMSG(vdc, 1, "[%d] Got VTOC with invalid label\n",
8437 vdc->instance);
8438 kmem_free(label, vdc->vdisk_bsize);
8439 mutex_enter(&vdc->lock);
8440 vdc_store_label_unk(vdc);
8444 kmem_free(label, vdc->vdisk_bsize);
8445 mutex_enter(&vdc->lock);
8446 vdc_store_label_vtoc(vdc, &geom, &vtoc);
8459 * vdc - soft state pointer for this instance of the device driver.
8465 vdc_validate(vdc_t *vdc)
8471 ASSERT(!MUTEX_HELD(&vdc->lock));
8473 mutex_enter(&vdc->lock);
8476 old_label = vdc->vdisk_label;
8477 bcopy(vdc->slice, &old_slice, sizeof (vd_slice_t) * V_NUMPAR);
8480 (void) vdc_validate_geometry(vdc);
8483 if (vdc->vdisk_type == VD_DISK_TYPE_DISK &&
8484 vdc->vdisk_label != old_label) {
8486 if (vdc->vdisk_label == VD_DISK_LABEL_EFI)
8487 rv = vdc_create_device_nodes_efi(vdc);
8489 rv = vdc_create_device_nodes_vtoc(vdc);
8492 DMSG(vdc, 0, "![%d] Failed to update device nodes",
8493 vdc->instance);
8497 mutex_exit(&vdc->lock);
8503 vdc_t *vdc = (vdc_t *)arg;
8505 vdc_validate(vdc);
8507 mutex_enter(&vdc->lock);
8508 ASSERT(vdc->validate_pending > 0);
8509 vdc->validate_pending--;
8510 mutex_exit(&vdc->lock);
8524 * vdc - soft state pointer for this instance of the device driver.
8530 vdc_setup_devid(vdc_t *vdc)
8551 rv = vdc_do_op(vdc, VD_OP_GET_DEVID, (caddr_t)vd_devid,
8554 DMSG(vdc, 2, "do_op returned %d\n", rv);
8572 rv = vdc_do_sync_op(vdc, VD_OP_GET_DEVID, (caddr_t)vd_devid,
8592 DMSG(vdc, 2, ": devid length = %d\n", vd_devid->length);
8595 if (ddi_devid_init(vdc->dip, DEVID_ENCAP, vd_devid->length,
8597 DMSG(vdc, 1, "[%d] Fail to created devid\n", vdc->instance);
8608 if (vdc->devid != NULL) {
8610 if (ddi_devid_compare(vdisk_devid, vdc->devid) == 0) {
8616 vdc->instance);
8618 devid_str = ddi_devid_str_encode(vdc->devid, NULL);
8621 vdc->instance,
8630 vdc->instance,
8640 if (ddi_devid_register(vdc->dip, vdisk_devid) != DDI_SUCCESS) {
8641 DMSG(vdc, 1, "[%d] Fail to register devid\n", vdc->instance);
8646 vdc->devid = vdisk_devid;
8652 vdc_store_label_efi(vdc_t *vdc, efi_gpt_t *gpt, efi_gpe_t *gpe)
8656 ASSERT(MUTEX_HELD(&vdc->lock));
8658 vdc->vdisk_label = VD_DISK_LABEL_EFI;
8659 bzero(vdc->vtoc, sizeof (struct extvtoc));
8660 bzero(vdc->geom, sizeof (struct dk_geom));
8661 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR);
8672 vdc->slice[i].start = gpe[i].efi_gpe_StartingLBA;
8673 vdc->slice[i].nblocks = gpe[i].efi_gpe_EndingLBA -
8677 ASSERT(vdc->vdisk_size != 0);
8678 vdc->slice[VD_EFI_WD_SLICE].start = 0;
8679 vdc->slice[VD_EFI_WD_SLICE].nblocks = vdc->vdisk_size;
8684 vdc_store_label_vtoc(vdc_t *vdc, struct dk_geom *geom, struct extvtoc *vtoc)
8688 ASSERT(MUTEX_HELD(&vdc->lock));
8689 ASSERT(vdc->vdisk_bsize == vtoc->v_sectorsz);
8691 vdc->vdisk_label = VD_DISK_LABEL_VTOC;
8692 bcopy(vtoc, vdc->vtoc, sizeof (struct extvtoc));
8693 bcopy(geom, vdc->geom, sizeof (struct dk_geom));
8694 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR);
8697 vdc->slice[i].start = vtoc->v_part[i].p_start;
8698 vdc->slice[i].nblocks = vtoc->v_part[i].p_size;
8703 vdc_store_label_unk(vdc_t *vdc)
8705 ASSERT(MUTEX_HELD(&vdc->lock));
8707 vdc->vdisk_label = VD_DISK_LABEL_UNK;
8708 bzero(vdc->vtoc, sizeof (struct extvtoc));
8709 bzero(vdc->geom, sizeof (struct dk_geom));
8710 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR);