Lines Matching refs:vnd

1 /*	$NetBSD: vnd.c,v 1.289 2023/05/19 15:42:43 mlelstv Exp $	*/
94 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.289 2023/05/19 15:42:43 mlelstv Exp $");
159 #define VND_GETXFER(vnd) pool_get(&(vnd)->sc_vxpool, PR_WAITOK)
160 #define VND_PUTXFER(vnd, vx) pool_put(&(vnd)->sc_vxpool, (vx))
165 #define VND_MAXPENDING(vnd) ((vnd)->sc_maxactive * 4)
166 #define VND_MAXPAGES(vnd) (1024 * 1024 / PAGE_SIZE)
236 CFATTACH_DECL3_NEW(vnd, sizeof(struct vnd_softc),
482 struct vnd_softc *vnd =
488 if (vnd == NULL) {
492 lp = vnd->sc_dkdev.dk_label;
494 if ((vnd->sc_flags & VNF_INITED) == 0) {
510 if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
526 vnd->sc_size) <= 0)
529 if (bounds_check_with_label(&vnd->sc_dkdev,
530 bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
547 pp = &vnd->sc_dkdev.dk_label->d_partitions[
557 if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
561 * thread to add requests, as a wedge on vnd queues
564 if (curlwp != vnd->sc_kthread && curlwp != uvm.pagedaemon_lwp) {
565 while (vnd->sc_pending >= VND_MAXPENDING(vnd))
566 tsleep(&vnd->sc_pending, PRIBIO, "vndpc", 0);
568 vnd->sc_pending++;
569 KASSERT(vnd->sc_pending > 0);
571 bufq_put(vnd->sc_tab, bp);
572 wakeup(&vnd->sc_tab);
583 vnode_has_strategy(struct vnd_softc *vnd)
585 return vnode_has_op(vnd->sc_vp, VOFFSET(vop_bmap)) &&
586 vnode_has_op(vnd->sc_vp, VOFFSET(vop_strategy));
593 vnode_has_large_blocks(struct vnd_softc *vnd)
597 iosize = vnd->sc_iosize;
598 vnd_secsize = vnd->sc_geom.vng_secsize;
610 vnode_strategy_probe(struct vnd_softc *vnd)
615 if (!vnode_has_strategy(vnd))
618 if (vnode_has_large_blocks(vnd))
625 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
626 error = VOP_BMAP(vnd->sc_vp, 0, NULL, &nbn, NULL);
627 VOP_UNLOCK(vnd->sc_vp);
640 struct vnd_softc *vnd = arg;
648 if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 &&
649 ! vnode_has_strategy(vnd))
650 vnd->sc_flags |= VNF_USE_VN_RDWR;
653 * to access blocks as small as defined by the vnd geometry.
655 if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 &&
656 vnode_has_large_blocks(vnd))
657 vnd->sc_flags |= VNF_USE_VN_RDWR;
661 printf("vndthread: vp %p, %s\n", vnd->sc_vp,
662 (vnd->sc_flags & VNF_USE_VN_RDWR) == 0 ?
668 vnd->sc_flags |= VNF_KTHREAD;
669 wakeup(&vnd->sc_kthread);
675 while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
680 obp = bufq_get(vnd->sc_tab);
682 tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
685 if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
686 KASSERT(vnd->sc_pending > 0);
687 if (vnd->sc_pending-- == VND_MAXPENDING(vnd))
688 wakeup(&vnd->sc_pending);
696 if (vnd->sc_vp->v_mount == NULL) {
702 if ((obp->b_flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
707 vnd->sc_dkdev.dk_label->d_secsize;
719 vnx = VND_GETXFER(vnd);
721 vnx->vx_vnd = vnd;
724 while (vnd->sc_active >= vnd->sc_maxactive) {
725 tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
727 vnd->sc_active++;
731 disk_busy(&vnd->sc_dkdev);
740 bp->b_vp = vnd->sc_vp;
747 fstrans_start_lazy(vnd->sc_vp->v_mount);
750 if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0)
751 handle_with_strategy(vnd, obp, bp);
753 handle_with_rdwr(vnd, obp, bp);
755 fstrans_done(vnd->sc_vp->v_mount);
765 vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
766 wakeup(&vnd->sc_kthread);
797 * 'obp' is a pointer to the original request fed to the vnd device.
800 handle_with_rdwr(struct vnd_softc *vnd, const struct buf *obp, struct buf *bp)
809 offset = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
811 vp = vnd->sc_vp;
815 printf("vnd (rdwr): vp %p, %s, rawblkno 0x%" PRIx64
819 vnd->sc_dkdev.dk_label->d_secsize, offset,
828 vnd->sc_cred, &resid, NULL);
832 * Avoid caching too many pages, the vnd user
838 if (npages > VND_MAXPAGES(vnd)) {
859 * 'obp' is a pointer to the original request fed to the vnd device.
862 handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
875 bn = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
877 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
895 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
896 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
897 VOP_UNLOCK(vnd->sc_vp);
926 " sz 0x%zx\n", vnd->sc_vp, vp, (long long)bn,
938 (long) (vnd-vnd_softc), &nbp->vb_buf,
977 struct vnd_softc *vnd = vnx->vx_vnd;
983 KASSERT(vnd->sc_active > 0);
990 disk_unbusy(&vnd->sc_dkdev, bp->b_bcount - bp->b_resid,
992 vnd->sc_active--;
993 if (vnd->sc_active == 0) {
994 wakeup(&vnd->sc_tab);
1001 VND_PUTXFER(vnd, vnx);
1053 struct vnd_softc *vnd;
1060 vnd = device_lookup_private(&vnd_cd, *un);
1061 if (vnd == NULL)
1064 if ((vnd->sc_flags & VNF_INITED) == 0)
1067 vn_lock(vnd->sc_vp, LK_SHARED | LK_RETRY);
1068 error = VOP_GETATTR(vnd->sc_vp, va, l->l_cred);
1069 VOP_UNLOCK(vnd->sc_vp);
1074 vnddoclear(struct vnd_softc *vnd, int pmask, int minor, bool force)
1078 if ((error = vndlock(vnd)) != 0)
1086 if (DK_BUSY(vnd, pmask) && !force) {
1087 vndunlock(vnd);
1092 dkwedge_delall(&vnd->sc_dkdev);
1101 vnd->sc_flags |= VNF_CLEARING;
1102 vndunlock(vnd);
1103 vndclear(vnd, minor);
1110 pool_destroy(&vnd->sc_vxpool);
1113 disk_detach(&vnd->sc_dkdev);
1147 struct vnd_softc *vnd;
1205 vnd = device_lookup_private(&vnd_cd, unit);
1206 if (vnd == NULL)
1236 if (vnd->sc_flags & VNF_INITED)
1241 if ((vnd->sc_flags & VNF_INITED) == 0)
1246 error = disk_ioctl(&vnd->sc_dkdev, dev, cmd, data, flag, l);
1253 if ((error = vndlock(vnd)) != 0)
1260 vnd->sc_flags |= VNF_USE_VN_RDWR;
1276 vnd->sc_flags |= VNF_USE_VN_RDWR;
1313 vnd->sc_comp_blksz = be32toh(ch->block_size);
1315 vnd->sc_comp_numoffs = be32toh(ch->num_blocks) + 1;
1317 if (!DK_DEV_BSIZE_OK(vnd->sc_comp_blksz)) {
1322 KASSERT(0 < vnd->sc_comp_blksz);
1323 KASSERT(0 < vnd->sc_comp_numoffs);
1331 if (SIZE_MAX/sizeof(uint64_t) < vnd->sc_comp_numoffs) {
1339 sizeof(uint64_t)*vnd->sc_comp_numoffs) ||
1340 (UQUAD_MAX/vnd->sc_comp_blksz <
1341 vnd->sc_comp_numoffs - 1)) {
1348 KASSERT(vnd->sc_comp_numoffs - 1 <=
1349 UQUAD_MAX/vnd->sc_comp_blksz);
1351 ((u_quad_t)vnd->sc_comp_numoffs - 1) *
1352 (u_quad_t)vnd->sc_comp_blksz;
1356 vnd->sc_comp_offsets =
1357 malloc(sizeof(uint64_t) * vnd->sc_comp_numoffs,
1362 (void *)vnd->sc_comp_offsets,
1363 sizeof(uint64_t) * vnd->sc_comp_numoffs,
1375 for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) {
1376 vnd->sc_comp_offsets[i] =
1377 be64toh(vnd->sc_comp_offsets[i]);
1379 be64toh(vnd->sc_comp_offsets[i + 1])
1380 - vnd->sc_comp_offsets[i];
1384 vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] =
1385 be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs
1389 vnd->sc_comp_buff = malloc(comp_maxsize,
1393 vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz,
1395 vnd->sc_comp_buffblk = -1;
1398 memset(&vnd->sc_comp_stream, 0, sizeof(z_stream));
1399 vnd->sc_comp_stream.zalloc = vnd_alloc;
1400 vnd->sc_comp_stream.zfree = vnd_free;
1401 error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS);
1403 if (vnd->sc_comp_stream.msg)
1404 printf("vnd%d: compressed file, %s\n",
1405 unit, vnd->sc_comp_stream.msg);
1411 vnd->sc_flags |= VNF_COMP | VNF_READONLY;
1420 vnd->sc_vp = vp;
1421 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
1426 error = bdev_ioctl(vattr.va_fsid, DIOCGSECTORSIZE, &vnd->sc_iosize, FKIOCTL, l);
1428 vnd->sc_iosize = vnd->sc_vp->v_mount->mnt_stat.f_frsize;
1431 if (vnd->sc_iosize == 0)
1432 vnd->sc_iosize = DEV_BSIZE;
1440 memcpy(&vnd->sc_geom, &vio->vnd_geom,
1446 if (!DK_DEV_BSIZE_OK(vnd->sc_geom.vng_secsize) ||
1447 vnd->sc_geom.vng_ntracks == 0 ||
1448 vnd->sc_geom.vng_nsectors == 0) {
1456 if (vnd->sc_geom.vng_ncylinders == 0)
1457 vnd->sc_geom.vng_ncylinders = vnd->sc_size / (
1458 (vnd->sc_geom.vng_secsize / DEV_BSIZE) *
1459 vnd->sc_geom.vng_ntracks *
1460 vnd->sc_geom.vng_nsectors);
1466 geomsize = (int64_t)vnd->sc_geom.vng_nsectors *
1467 vnd->sc_geom.vng_ntracks *
1468 vnd->sc_geom.vng_ncylinders *
1469 (vnd->sc_geom.vng_secsize / DEV_BSIZE);
1475 if (vnd->sc_size < geomsize) {
1479 } else if (vnd->sc_size >= (32 * 64)) {
1484 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1485 vnd->sc_geom.vng_nsectors = 32;
1486 vnd->sc_geom.vng_ntracks = 64;
1487 vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
1489 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1490 vnd->sc_geom.vng_nsectors = 1;
1491 vnd->sc_geom.vng_ntracks = 1;
1492 vnd->sc_geom.vng_ncylinders = vnd->sc_size;
1495 vnd_set_geometry(vnd);
1498 vnd->sc_flags |= VNF_READONLY;
1501 if ((error = vndsetcred(vnd, l->l_cred)) != 0)
1504 vndthrottle(vnd, vnd->sc_vp);
1505 vio->vnd_osize = dbtob(vnd->sc_size);
1507 vio->vnd_size = dbtob(vnd->sc_size);
1508 vnd->sc_flags |= VNF_INITED;
1511 error = kthread_create(PRI_NONE, 0, NULL, vndthread, vnd,
1512 &vnd->sc_kthread, "%s", device_xname(vnd->sc_dev));
1515 while ((vnd->sc_flags & VNF_KTHREAD) == 0) {
1516 tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0);
1521 vnd->sc_vp, (unsigned long) vnd->sc_size,
1522 vnd->sc_geom.vng_secsize,
1523 vnd->sc_geom.vng_nsectors,
1524 vnd->sc_geom.vng_ntracks,
1525 vnd->sc_geom.vng_ncylinders);
1529 disk_attach(&vnd->sc_dkdev);
1532 pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
1535 vndunlock(vnd);
1540 dkwedge_discover(&vnd->sc_dkdev);
1550 if (vnd->sc_comp_offsets) {
1551 free(vnd->sc_comp_offsets, M_DEVBUF);
1552 vnd->sc_comp_offsets = NULL;
1554 if (vnd->sc_comp_buff) {
1555 free(vnd->sc_comp_buff, M_DEVBUF);
1556 vnd->sc_comp_buff = NULL;
1558 if (vnd->sc_comp_decombuf) {
1559 free(vnd->sc_comp_decombuf, M_DEVBUF);
1560 vnd->sc_comp_decombuf = NULL;
1563 vndunlock(vnd);
1572 if ((error = vnddoclear(vnd, pmask, minor(dev), force)) != 0)
1587 if ((error = vndlock(vnd)) != 0)
1590 vnd->sc_flags |= VNF_LABELLING;
1601 error = setdisklabel(vnd->sc_dkdev.dk_label,
1602 lp, 0, vnd->sc_dkdev.dk_cpulabel);
1610 vndstrategy, vnd->sc_dkdev.dk_label,
1611 vnd->sc_dkdev.dk_cpulabel);
1614 vnd->sc_flags &= ~VNF_LABELLING;
1616 vndunlock(vnd);
1625 vnd->sc_flags |= VNF_KLABEL;
1627 vnd->sc_flags &= ~VNF_KLABEL;
1632 vnd->sc_flags |= VNF_WLABEL;
1634 vnd->sc_flags &= ~VNF_WLABEL;
1638 vndgetdefaultlabel(vnd, (struct disklabel *)data);
1643 vndgetdefaultlabel(vnd, &newlabel);
1656 bufq_getstrategyname(vnd->sc_tab),
1668 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1669 error = VOP_FSYNC(vnd->sc_vp, vnd->sc_cred,
1671 VOP_UNLOCK(vnd->sc_vp);
1688 vndsetcred(struct vnd_softc *vnd, kauth_cred_t cred)
1695 vnd->sc_cred = kauth_cred_dup(cred);
1700 aiov.iov_len = uimin(DEV_BSIZE, dbtob(vnd->sc_size));
1707 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1708 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
1711 * Because vnd does all IO directly through the vnode
1717 error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
1720 VOP_UNLOCK(vnd->sc_vp);
1730 vndthrottle(struct vnd_softc *vnd, struct vnode *vp)
1734 vnd->sc_maxactive = 2;
1736 vnd->sc_maxactive = 8;
1738 if (vnd->sc_maxactive < 1)
1739 vnd->sc_maxactive = 1;
1746 struct vnd_softc *vnd;
1748 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
1749 if (vnd->sc_flags & VNF_INITED)
1750 vndclear(vnd);
1755 vndclear(struct vnd_softc *vnd, int myminor)
1757 struct vnode *vp = vnd->sc_vp;
1764 printf("vndclear(%p): vp %p\n", vnd, vp);
1772 mn = DISKMINOR(device_unit(vnd->sc_dev), i);
1779 if ((vnd->sc_flags & VNF_READONLY) == 0)
1783 bufq_drain(vnd->sc_tab);
1786 vnd->sc_flags |= VNF_VUNCONF;
1787 wakeup(&vnd->sc_tab);
1788 while (vnd->sc_flags & VNF_KTHREAD)
1789 tsleep(&vnd->sc_kthread, PRIBIO, "vnthr", 0);
1793 if (vnd->sc_flags & VNF_COMP) {
1794 if (vnd->sc_comp_offsets) {
1795 free(vnd->sc_comp_offsets, M_DEVBUF);
1796 vnd->sc_comp_offsets = NULL;
1798 if (vnd->sc_comp_buff) {
1799 free(vnd->sc_comp_buff, M_DEVBUF);
1800 vnd->sc_comp_buff = NULL;
1802 if (vnd->sc_comp_decombuf) {
1803 free(vnd->sc_comp_decombuf, M_DEVBUF);
1804 vnd->sc_comp_decombuf = NULL;
1808 vnd->sc_flags &=
1813 (void) vn_close(vp, fflags, vnd->sc_cred);
1814 kauth_cred_free(vnd->sc_cred);
1815 vnd->sc_vp = NULL;
1816 vnd->sc_cred = NULL;
1817 vnd->sc_size = 0;
1884 strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
1903 * Read the disklabel from a vnd. If one is not present, create a fake one.
1995 struct vnd_softc *vnd =
2017 comp_block = bn / (off_t)vnd->sc_comp_blksz;
2020 if (comp_block >= vnd->sc_comp_numoffs) {
2027 if (comp_block != vnd->sc_comp_buffblk) {
2028 length = vnd->sc_comp_offsets[comp_block + 1] -
2029 vnd->sc_comp_offsets[comp_block];
2030 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
2031 error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff,
2032 length, vnd->sc_comp_offsets[comp_block],
2033 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vnd->sc_cred,
2037 VOP_UNLOCK(vnd->sc_vp);
2042 vnd->sc_comp_stream.next_in = vnd->sc_comp_buff;
2043 vnd->sc_comp_stream.avail_in = length;
2044 vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf;
2045 vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz;
2046 inflateReset(&vnd->sc_comp_stream);
2047 error = inflate(&vnd->sc_comp_stream, Z_FINISH);
2049 if (vnd->sc_comp_stream.msg)
2050 aprint_normal_dev(vnd->sc_dev,
2052 vnd->sc_comp_stream.msg);
2054 VOP_UNLOCK(vnd->sc_vp);
2058 vnd->sc_comp_buffblk = comp_block;
2059 VOP_UNLOCK(vnd->sc_vp);
2063 offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz;
2064 length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer;
2073 error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer,
2103 vnd_set_geometry(struct vnd_softc *vnd)
2105 struct disk_geom *dg = &vnd->sc_dkdev.dk_geom;
2110 spb = vnd->sc_geom.vng_secsize / DEV_BSIZE;
2111 dg->dg_secperunit = vnd->sc_size / spb;
2112 dg->dg_secsize = vnd->sc_geom.vng_secsize;
2113 dg->dg_nsectors = vnd->sc_geom.vng_nsectors;
2114 dg->dg_ntracks = vnd->sc_geom.vng_ntracks;
2115 dg->dg_ncylinders = vnd->sc_geom.vng_ncylinders;
2123 disk_set_info(vnd->sc_dev, &vnd->sc_dkdev, NULL);
2132 MODULE(MODULE_CLASS_DRIVER, vnd, VND_DEPENDS);
2137 CFDRIVER_DECL(vnd, DV_DISK, NULL);
2151 error = devsw_attach("vnd", &vnd_bdevsw, &vnd_bmajor,