1/* $NetBSD: genfs_io.c,v 1.104 2024/04/05 13:05:40 riastradh Exp $ */ 2 3/* 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33#include <sys/cdefs.h> 34__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.104 2024/04/05 13:05:40 riastradh Exp $"); 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/proc.h> 39#include <sys/kernel.h> 40#include <sys/mount.h> 41#include <sys/vnode.h> 42#include <sys/kmem.h> 43#include <sys/kauth.h> 44#include <sys/fstrans.h> 45#include <sys/buf.h> 46#include <sys/atomic.h> 47 48#include <miscfs/genfs/genfs.h> 49#include <miscfs/genfs/genfs_node.h> 50#include <miscfs/specfs/specdev.h> 51 52#include <uvm/uvm.h> 53#include <uvm/uvm_pager.h> 54#include <uvm/uvm_page_array.h> 55 56static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *, 57 off_t, enum uio_rw); 58static void genfs_dio_iodone(struct buf *); 59 60static int genfs_getpages_read(struct vnode *, struct vm_page **, int, off_t, 61 off_t, bool, bool, bool, bool); 62static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw, 63 void (*)(struct buf *)); 64static void genfs_rel_pages(struct vm_page **, unsigned int); 65 66int genfs_maxdio = MAXPHYS; 67 68static void 69genfs_rel_pages(struct vm_page **pgs, unsigned int npages) 70{ 71 unsigned int i; 72 73 for (i = 0; i < npages; i++) { 74 struct vm_page *pg = pgs[i]; 75 76 if (pg == NULL || pg == PGO_DONTCARE) 77 continue; 78 KASSERT(uvm_page_owner_locked_p(pg, true)); 79 if (pg->flags & PG_FAKE) { 80 pg->flags |= PG_RELEASED; 81 } 82 } 83 uvm_page_unbusy(pgs, npages); 84} 85 86/* 87 * generic VM getpages routine. 88 * Return PG_BUSY pages for the given range, 89 * reading from backing store if necessary. 90 */ 91 92int 93genfs_getpages(void *v) 94{ 95 struct vop_getpages_args /* { 96 struct vnode *a_vp; 97 voff_t a_offset; 98 struct vm_page **a_m; 99 int *a_count; 100 int a_centeridx; 101 vm_prot_t a_access_type; 102 int a_advice; 103 int a_flags; 104 } */ * const ap = v; 105 106 off_t diskeof, memeof; 107 int i, error, npages, iflag; 108 const int flags = ap->a_flags; 109 struct vnode * const vp = ap->a_vp; 110 struct uvm_object * const uobj = &vp->v_uobj; 111 const bool async = (flags & PGO_SYNCIO) == 0; 112 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0; 113 const bool overwrite = (flags & PGO_OVERWRITE) != 0; 114 const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0; 115 const bool need_wapbl = (vp->v_mount->mnt_wapbl && 116 (flags & PGO_JOURNALLOCKED) == 0); 117 const bool glocked = (flags & PGO_GLOCKHELD) != 0; 118 bool holds_wapbl = false; 119 struct mount *trans_mount = NULL; 120 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist); 121 122 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx/%jx count %jd", 123 (uintptr_t)vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count); 124 125 KASSERT(memwrite >= overwrite); 126 KASSERT(vp->v_type == VREG || vp->v_type == VDIR || 127 vp->v_type == VLNK || vp->v_type == VBLK); 128 129 /* 130 * the object must be locked. it can only be a read lock when 131 * processing a read fault with PGO_LOCKED. 132 */ 133 134 KASSERT(rw_lock_held(uobj->vmobjlock)); 135 KASSERT(rw_write_held(uobj->vmobjlock) || 136 ((flags & PGO_LOCKED) != 0 && !memwrite)); 137 138#ifdef DIAGNOSTIC 139 if ((flags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl) 140 WAPBL_JLOCK_ASSERT(vp->v_mount); 141#endif 142 143 /* 144 * check for reclaimed vnode. v_interlock is not held here, but 145 * VI_DEADCHECK is set with vmobjlock held. 146 */ 147 148 iflag = atomic_load_relaxed(&vp->v_iflag); 149 if (__predict_false((iflag & VI_DEADCHECK) != 0)) { 150 mutex_enter(vp->v_interlock); 151 error = vdead_check(vp, VDEAD_NOWAIT); 152 mutex_exit(vp->v_interlock); 153 if (error) { 154 if ((flags & PGO_LOCKED) == 0) 155 rw_exit(uobj->vmobjlock); 156 return error; 157 } 158 } 159 160startover: 161 error = 0; 162 const voff_t origvsize = vp->v_size; 163 const off_t origoffset = ap->a_offset; 164 const int orignpages = *ap->a_count; 165 166 GOP_SIZE(vp, origvsize, &diskeof, 0); 167 if (flags & PGO_PASTEOF) { 168 off_t newsize; 169#if defined(DIAGNOSTIC) 170 off_t writeeof; 171#endif /* defined(DIAGNOSTIC) */ 172 173 newsize = MAX(origvsize, 174 origoffset + (orignpages << PAGE_SHIFT)); 175 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM); 176#if defined(DIAGNOSTIC) 177 GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM); 178 if (newsize > round_page(writeeof)) { 179 panic("%s: past eof: %" PRId64 " vs. %" PRId64, 180 __func__, newsize, round_page(writeeof)); 181 } 182#endif /* defined(DIAGNOSTIC) */ 183 } else { 184 GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM); 185 } 186 KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages); 187 KASSERT((origoffset & (PAGE_SIZE - 1)) == 0); 188 KASSERT(origoffset >= 0); 189 KASSERT(orignpages > 0); 190 191 /* 192 * Bounds-check the request. 193 */ 194 195 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) { 196 if ((flags & PGO_LOCKED) == 0) { 197 rw_exit(uobj->vmobjlock); 198 } 199 UVMHIST_LOG(ubchist, "off 0x%jx count %jd goes past EOF 0x%jx", 200 origoffset, *ap->a_count, memeof,0); 201 error = EINVAL; 202 goto out_err; 203 } 204 205 /* uobj is locked */ 206 207 if ((flags & PGO_NOTIMESTAMP) == 0 && 208 (vp->v_type != VBLK || 209 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) { 210 int updflags = 0; 211 212 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) { 213 updflags = GOP_UPDATE_ACCESSED; 214 } 215 if (memwrite) { 216 updflags |= GOP_UPDATE_MODIFIED; 217 } 218 if (updflags != 0) { 219 GOP_MARKUPDATE(vp, updflags); 220 } 221 } 222 223 /* 224 * For PGO_LOCKED requests, just return whatever's in memory. 225 */ 226 227 if (flags & PGO_LOCKED) { 228 int nfound; 229 struct vm_page *pg; 230 231 KASSERT(!glocked); 232 npages = *ap->a_count; 233#if defined(DEBUG) 234 for (i = 0; i < npages; i++) { 235 pg = ap->a_m[i]; 236 KASSERT(pg == NULL || pg == PGO_DONTCARE); 237 } 238#endif /* defined(DEBUG) */ 239 nfound = uvn_findpages(uobj, origoffset, &npages, 240 ap->a_m, NULL, 241 UFP_NOWAIT | UFP_NOALLOC | UFP_NOBUSY | 242 (memwrite ? UFP_NORDONLY : 0)); 243 KASSERT(npages == *ap->a_count); 244 if (nfound == 0) { 245 error = EBUSY; 246 goto out_err; 247 } 248 /* 249 * lock and unlock g_glock to ensure that no one is truncating 250 * the file behind us. 251 */ 252 if (!genfs_node_rdtrylock(vp)) { 253 /* 254 * restore the array. 255 */ 256 257 for (i = 0; i < npages; i++) { 258 pg = ap->a_m[i]; 259 260 if (pg != NULL && pg != PGO_DONTCARE) { 261 ap->a_m[i] = NULL; 262 } 263 KASSERT(ap->a_m[i] == NULL || 264 ap->a_m[i] == PGO_DONTCARE); 265 } 266 } else { 267 genfs_node_unlock(vp); 268 } 269 error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0); 270 if (error == 0 && memwrite) { 271 for (i = 0; i < npages; i++) { 272 pg = ap->a_m[i]; 273 if (pg == NULL || pg == PGO_DONTCARE) { 274 continue; 275 } 276 if (uvm_pagegetdirty(pg) == 277 UVM_PAGE_STATUS_CLEAN) { 278 uvm_pagemarkdirty(pg, 279 UVM_PAGE_STATUS_UNKNOWN); 280 } 281 } 282 } 283 goto out_err; 284 } 285 rw_exit(uobj->vmobjlock); 286 287 /* 288 * find the requested pages and make some simple checks. 289 * leave space in the page array for a whole block. 290 */ 291 292 const int fs_bshift = (vp->v_type != VBLK) ? 293 vp->v_mount->mnt_fs_bshift : DEV_BSHIFT; 294 const int fs_bsize = 1 << fs_bshift; 295#define blk_mask (fs_bsize - 1) 296#define trunc_blk(x) ((x) & ~blk_mask) 297#define round_blk(x) (((x) + blk_mask) & ~blk_mask) 298 299 const int orignmempages = MIN(orignpages, 300 round_page(memeof - origoffset) >> PAGE_SHIFT); 301 npages = orignmempages; 302 const off_t startoffset = trunc_blk(origoffset); 303 const off_t endoffset = MIN( 304 round_page(round_blk(origoffset + (npages << PAGE_SHIFT))), 305 round_page(memeof)); 306 const int ridx = (origoffset - startoffset) >> PAGE_SHIFT; 307 308 const int pgs_size = sizeof(struct vm_page *) * 309 ((endoffset - startoffset) >> PAGE_SHIFT); 310 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES]; 311 312 if (pgs_size > sizeof(pgs_onstack)) { 313 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP); 314 if (pgs == NULL) { 315 pgs = pgs_onstack; 316 error = ENOMEM; 317 goto out_err; 318 } 319 } else { 320 pgs = pgs_onstack; 321 (void)memset(pgs, 0, pgs_size); 322 } 323 324 UVMHIST_LOG(ubchist, "ridx %jd npages %jd startoff %#jx endoff %#jx", 325 ridx, npages, startoffset, endoffset); 326 327 if (trans_mount == NULL) { 328 trans_mount = vp->v_mount; 329 fstrans_start(trans_mount); 330 /* 331 * check if this vnode is still valid. 332 */ 333 mutex_enter(vp->v_interlock); 334 error = vdead_check(vp, 0); 335 mutex_exit(vp->v_interlock); 336 if (error) 337 goto out_err_free; 338 /* 339 * XXX: This assumes that we come here only via 340 * the mmio path 341 */ 342 if (blockalloc && need_wapbl) { 343 error = WAPBL_BEGIN(trans_mount); 344 if (error) 345 goto out_err_free; 346 holds_wapbl = true; 347 } 348 } 349 350 /* 351 * hold g_glock to prevent a race with truncate. 352 * 353 * check if our idea of v_size is still valid. 354 */ 355 356 KASSERT(!glocked || genfs_node_wrlocked(vp)); 357 if (!glocked) { 358 if (blockalloc) { 359 genfs_node_wrlock(vp); 360 } else { 361 genfs_node_rdlock(vp); 362 } 363 } 364 rw_enter(uobj->vmobjlock, RW_WRITER); 365 if (vp->v_size < origvsize) { 366 if (!glocked) { 367 genfs_node_unlock(vp); 368 } 369 if (pgs != pgs_onstack) 370 kmem_free(pgs, pgs_size); 371 goto startover; 372 } 373 374 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL, 375 async ? UFP_NOWAIT : UFP_ALL) != orignmempages) { 376 if (!glocked) { 377 genfs_node_unlock(vp); 378 } 379 KASSERT(async != 0); 380 genfs_rel_pages(&pgs[ridx], orignmempages); 381 rw_exit(uobj->vmobjlock); 382 error = EBUSY; 383 goto out_err_free; 384 } 385 386 /* 387 * if PGO_OVERWRITE is set, don't bother reading the pages. 388 */ 389 390 if (overwrite) { 391 if (!glocked) { 392 genfs_node_unlock(vp); 393 } 394 UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0); 395 396 for (i = 0; i < npages; i++) { 397 struct vm_page *pg = pgs[ridx + i]; 398 399 /* 400 * it's caller's responsibility to allocate blocks 401 * beforehand for the overwrite case. 402 */ 403 404 KASSERT((pg->flags & PG_RDONLY) == 0 || !blockalloc); 405 pg->flags &= ~PG_RDONLY; 406 407 /* 408 * mark the page DIRTY. 409 * otherwise another thread can do putpages and pull 410 * our vnode from syncer's queue before our caller does 411 * ubc_release. note that putpages won't see CLEAN 412 * pages even if they are BUSY. 413 */ 414 415 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 416 } 417 npages += ridx; 418 goto out; 419 } 420 421 /* 422 * if the pages are already resident, just return them. 423 */ 424 425 for (i = 0; i < npages; i++) { 426 struct vm_page *pg = pgs[ridx + i]; 427 428 if ((pg->flags & PG_FAKE) || 429 (blockalloc && (pg->flags & PG_RDONLY) != 0)) { 430 break; 431 } 432 } 433 if (i == npages) { 434 if (!glocked) { 435 genfs_node_unlock(vp); 436 } 437 UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0); 438 npages += ridx; 439 goto out; 440 } 441 442 /* 443 * the page wasn't resident and we're not overwriting, 444 * so we're going to have to do some i/o. 445 * find any additional pages needed to cover the expanded range. 446 */ 447 448 npages = (endoffset - startoffset) >> PAGE_SHIFT; 449 if (startoffset != origoffset || npages != orignmempages) { 450 int npgs; 451 452 /* 453 * we need to avoid deadlocks caused by locking 454 * additional pages at lower offsets than pages we 455 * already have locked. unlock them all and start over. 456 */ 457 458 genfs_rel_pages(&pgs[ridx], orignmempages); 459 memset(pgs, 0, pgs_size); 460 461 UVMHIST_LOG(ubchist, "reset npages start 0x%jx end 0x%jx", 462 startoffset, endoffset, 0,0); 463 npgs = npages; 464 if (uvn_findpages(uobj, startoffset, &npgs, pgs, NULL, 465 async ? UFP_NOWAIT : UFP_ALL) != npages) { 466 if (!glocked) { 467 genfs_node_unlock(vp); 468 } 469 KASSERT(async != 0); 470 genfs_rel_pages(pgs, npages); 471 rw_exit(uobj->vmobjlock); 472 error = EBUSY; 473 goto out_err_free; 474 } 475 } 476 477 rw_exit(uobj->vmobjlock); 478 error = genfs_getpages_read(vp, pgs, npages, startoffset, diskeof, 479 async, memwrite, blockalloc, glocked); 480 if (!glocked) { 481 genfs_node_unlock(vp); 482 } 483 if (error == 0 && async) 484 goto out_err_free; 485 rw_enter(uobj->vmobjlock, RW_WRITER); 486 487 /* 488 * we're almost done! release the pages... 489 * for errors, we free the pages. 490 * otherwise we activate them and mark them as valid and clean. 491 * also, unbusy pages that were not actually requested. 492 */ 493 494 if (error) { 495 genfs_rel_pages(pgs, npages); 496 rw_exit(uobj->vmobjlock); 497 UVMHIST_LOG(ubchist, "returning error %jd", error,0,0,0); 498 goto out_err_free; 499 } 500 501out: 502 UVMHIST_LOG(ubchist, "succeeding, npages %jd", npages,0,0,0); 503 error = 0; 504 for (i = 0; i < npages; i++) { 505 struct vm_page *pg = pgs[i]; 506 if (pg == NULL) { 507 continue; 508 } 509 UVMHIST_LOG(ubchist, "examining pg %#jx flags 0x%jx", 510 (uintptr_t)pg, pg->flags, 0,0); 511 if (pg->flags & PG_FAKE && !overwrite) { 512 /* 513 * we've read page's contents from the backing storage. 514 * 515 * for a read fault, we keep them CLEAN; if we 516 * encountered a hole while reading, the pages can 517 * already been dirtied with zeros. 518 */ 519 KASSERTMSG(blockalloc || uvm_pagegetdirty(pg) == 520 UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg); 521 pg->flags &= ~PG_FAKE; 522 } 523 KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0); 524 if (i < ridx || i >= ridx + orignmempages || async) { 525 UVMHIST_LOG(ubchist, "unbusy pg %#jx offset 0x%jx", 526 (uintptr_t)pg, pg->offset,0,0); 527 if (pg->flags & PG_FAKE) { 528 KASSERT(overwrite); 529 uvm_pagezero(pg); 530 } 531 if (pg->flags & PG_RELEASED) { 532 uvm_pagefree(pg); 533 continue; 534 } 535 uvm_pagelock(pg); 536 uvm_pageenqueue(pg); 537 uvm_pagewakeup(pg); 538 uvm_pageunlock(pg); 539 pg->flags &= ~(PG_BUSY|PG_FAKE); 540 UVM_PAGE_OWN(pg, NULL); 541 } else if (memwrite && !overwrite && 542 uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) { 543 /* 544 * for a write fault, start dirtiness tracking of 545 * requested pages. 546 */ 547 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN); 548 } 549 } 550 rw_exit(uobj->vmobjlock); 551 if (ap->a_m != NULL) { 552 memcpy(ap->a_m, &pgs[ridx], 553 orignmempages * sizeof(struct vm_page *)); 554 } 555 556out_err_free: 557 if (pgs != NULL && pgs != pgs_onstack) 558 kmem_free(pgs, pgs_size); 559out_err: 560 if (trans_mount != NULL) { 561 if (holds_wapbl) 562 WAPBL_END(trans_mount); 563 fstrans_done(trans_mount); 564 } 565 return error; 566} 567 568/* 569 * genfs_getpages_read: Read the pages in with VOP_BMAP/VOP_STRATEGY. 570 * 571 * "glocked" (which is currently not actually used) tells us not whether 572 * the genfs_node is locked on entry (it always is) but whether it was 573 * locked on entry to genfs_getpages. 574 */ 575static int 576genfs_getpages_read(struct vnode *vp, struct vm_page **pgs, int npages, 577 off_t startoffset, off_t diskeof, 578 bool async, bool memwrite, bool blockalloc, bool glocked) 579{ 580 struct uvm_object * const uobj = &vp->v_uobj; 581 const int fs_bshift = (vp->v_type != VBLK) ? 582 vp->v_mount->mnt_fs_bshift : DEV_BSHIFT; 583 const int dev_bshift = (vp->v_type != VBLK) ? 584 vp->v_mount->mnt_dev_bshift : DEV_BSHIFT; 585 kauth_cred_t const cred = curlwp->l_cred; /* XXXUBC curlwp */ 586 size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes; 587 vaddr_t kva; 588 struct buf *bp, *mbp; 589 bool sawhole = false; 590 int i; 591 int error = 0; 592 593 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 594 595 /* 596 * read the desired page(s). 597 */ 598 599 totalbytes = npages << PAGE_SHIFT; 600 bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0)); 601 tailbytes = totalbytes - bytes; 602 skipbytes = 0; 603 604 kva = uvm_pagermapin(pgs, npages, 605 UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK)); 606 if (kva == 0) 607 return EBUSY; 608 609 mbp = getiobuf(vp, true); 610 mbp->b_bufsize = totalbytes; 611 mbp->b_data = (void *)kva; 612 mbp->b_resid = mbp->b_bcount = bytes; 613 mbp->b_cflags |= BC_BUSY; 614 if (async) { 615 mbp->b_flags = B_READ | B_ASYNC; 616 mbp->b_iodone = uvm_aio_aiodone; 617 } else { 618 mbp->b_flags = B_READ; 619 mbp->b_iodone = NULL; 620 } 621 if (async) 622 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED); 623 else 624 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL); 625 626 /* 627 * if EOF is in the middle of the range, zero the part past EOF. 628 * skip over pages which are not PG_FAKE since in that case they have 629 * valid data that we need to preserve. 630 */ 631 632 tailstart = bytes; 633 while (tailbytes > 0) { 634 const int len = PAGE_SIZE - (tailstart & PAGE_MASK); 635 636 KASSERT(len <= tailbytes); 637 if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) { 638 memset((void *)(kva + tailstart), 0, len); 639 UVMHIST_LOG(ubchist, "tailbytes %#jx 0x%jx 0x%jx", 640 (uintptr_t)kva, tailstart, len, 0); 641 } 642 tailstart += len; 643 tailbytes -= len; 644 } 645 646 /* 647 * now loop over the pages, reading as needed. 648 */ 649 650 bp = NULL; 651 off_t offset; 652 for (offset = startoffset; 653 bytes > 0; 654 offset += iobytes, bytes -= iobytes) { 655 int run; 656 daddr_t lbn, blkno; 657 int pidx; 658 struct vnode *devvp; 659 660 /* 661 * skip pages which don't need to be read. 662 */ 663 664 pidx = (offset - startoffset) >> PAGE_SHIFT; 665 while ((pgs[pidx]->flags & PG_FAKE) == 0) { 666 size_t b; 667 668 KASSERT((offset & (PAGE_SIZE - 1)) == 0); 669 if ((pgs[pidx]->flags & PG_RDONLY)) { 670 sawhole = true; 671 } 672 b = MIN(PAGE_SIZE, bytes); 673 offset += b; 674 bytes -= b; 675 skipbytes += b; 676 pidx++; 677 UVMHIST_LOG(ubchist, "skipping, new offset 0x%jx", 678 offset, 0,0,0); 679 if (bytes == 0) { 680 goto loopdone; 681 } 682 } 683 684 /* 685 * bmap the file to find out the blkno to read from and 686 * how much we can read in one i/o. if bmap returns an error, 687 * skip the rest of the top-level i/o. 688 */ 689 690 lbn = offset >> fs_bshift; 691 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run); 692 if (error) { 693 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd", 694 lbn,error,0,0); 695 skipbytes += bytes; 696 bytes = 0; 697 goto loopdone; 698 } 699 700 /* 701 * see how many pages can be read with this i/o. 702 * reduce the i/o size if necessary to avoid 703 * overwriting pages with valid data. 704 */ 705 706 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset, 707 bytes); 708 if (offset + iobytes > round_page(offset)) { 709 int pcount; 710 711 pcount = 1; 712 while (pidx + pcount < npages && 713 pgs[pidx + pcount]->flags & PG_FAKE) { 714 pcount++; 715 } 716 iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) - 717 (offset - trunc_page(offset))); 718 } 719 720 /* 721 * if this block isn't allocated, zero it instead of 722 * reading it. unless we are going to allocate blocks, 723 * mark the pages we zeroed PG_RDONLY. 724 */ 725 726 if (blkno == (daddr_t)-1) { 727 int holepages = (round_page(offset + iobytes) - 728 trunc_page(offset)) >> PAGE_SHIFT; 729 UVMHIST_LOG(ubchist, "lbn 0x%jx -> HOLE", lbn,0,0,0); 730 731 sawhole = true; 732 memset((char *)kva + (offset - startoffset), 0, 733 iobytes); 734 skipbytes += iobytes; 735 736 if (!blockalloc) { 737 rw_enter(uobj->vmobjlock, RW_WRITER); 738 for (i = 0; i < holepages; i++) { 739 pgs[pidx + i]->flags |= PG_RDONLY; 740 } 741 rw_exit(uobj->vmobjlock); 742 } 743 continue; 744 } 745 746 /* 747 * allocate a sub-buf for this piece of the i/o 748 * (or just use mbp if there's only 1 piece), 749 * and start it going. 750 */ 751 752 if (offset == startoffset && iobytes == bytes) { 753 bp = mbp; 754 } else { 755 UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd", 756 (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0); 757 bp = getiobuf(vp, true); 758 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes); 759 } 760 bp->b_lblkno = 0; 761 762 /* adjust physical blkno for partial blocks */ 763 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >> 764 dev_bshift); 765 766 UVMHIST_LOG(ubchist, 767 "bp %#jx offset 0x%x bcount 0x%x blkno 0x%x", 768 (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno); 769 770 VOP_STRATEGY(devvp, bp); 771 } 772 773loopdone: 774 nestiobuf_done(mbp, skipbytes, error); 775 if (async) { 776 UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0); 777 return 0; 778 } 779 if (bp != NULL) { 780 error = biowait(mbp); 781 } 782 783 /* Remove the mapping (make KVA available as soon as possible) */ 784 uvm_pagermapout(kva, npages); 785 786 /* 787 * if this we encountered a hole then we have to do a little more work. 788 * for read faults, we marked the page PG_RDONLY so that future 789 * write accesses to the page will fault again. 790 * for write faults, we must make sure that the backing store for 791 * the page is completely allocated while the pages are locked. 792 */ 793 794 if (!error && sawhole && blockalloc) { 795 error = GOP_ALLOC(vp, startoffset, 796 npages << PAGE_SHIFT, 0, cred); 797 UVMHIST_LOG(ubchist, "gop_alloc off 0x%jx/0x%jx -> %jd", 798 startoffset, npages << PAGE_SHIFT, error,0); 799 if (!error) { 800 rw_enter(uobj->vmobjlock, RW_WRITER); 801 for (i = 0; i < npages; i++) { 802 struct vm_page *pg = pgs[i]; 803 804 if (pg == NULL) { 805 continue; 806 } 807 pg->flags &= ~PG_RDONLY; 808 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 809 UVMHIST_LOG(ubchist, "mark dirty pg %#jx", 810 (uintptr_t)pg, 0, 0, 0); 811 } 812 rw_exit(uobj->vmobjlock); 813 } 814 } 815 816 putiobuf(mbp); 817 return error; 818} 819 820/* 821 * generic VM putpages routine. 822 * Write the given range of pages to backing store. 823 * 824 * => "offhi == 0" means flush all pages at or after "offlo". 825 * => object should be locked by caller. we return with the 826 * object unlocked. 827 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O). 828 * thus, a caller might want to unlock higher level resources 829 * (e.g. vm_map) before calling flush. 830 * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block 831 * => if PGO_ALLPAGES is set, then all pages in the object will be processed. 832 * 833 * note on "cleaning" object and PG_BUSY pages: 834 * this routine is holding the lock on the object. the only time 835 * that it can run into a PG_BUSY page that it does not own is if 836 * some other process has started I/O on the page (e.g. either 837 * a pagein, or a pageout). if the PG_BUSY page is being paged 838 * in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no 839 * one has had a chance to modify it yet. if the PG_BUSY page is 840 * being paged out then it means that someone else has already started 841 * cleaning the page for us (how nice!). in this case, if we 842 * have syncio specified, then after we make our pass through the 843 * object we need to wait for the other PG_BUSY pages to clear 844 * off (i.e. we need to do an iosync). also note that once a 845 * page is PG_BUSY it must stay in its object until it is un-busyed. 846 */ 847 848int 849genfs_putpages(void *v) 850{ 851 struct vop_putpages_args /* { 852 struct vnode *a_vp; 853 voff_t a_offlo; 854 voff_t a_offhi; 855 int a_flags; 856 } */ * const ap = v; 857 858 return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi, 859 ap->a_flags, NULL); 860} 861 862int 863genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, 864 int origflags, struct vm_page **busypg) 865{ 866 struct uvm_object * const uobj = &vp->v_uobj; 867 krwlock_t * const slock = uobj->vmobjlock; 868 off_t nextoff; 869 int i, error, npages, nback; 870 int freeflag; 871 /* 872 * This array is larger than it should so that it's size is constant. 873 * The right size is MAXPAGES. 874 */ 875 struct vm_page *pgs[MAXPHYS / MIN_PAGE_SIZE]; 876#define MAXPAGES (MAXPHYS / PAGE_SIZE) 877 struct vm_page *pg, *tpg; 878 struct uvm_page_array a; 879 bool wasclean, needs_clean; 880 bool async = (origflags & PGO_SYNCIO) == 0; 881 bool pagedaemon = curlwp == uvm.pagedaemon_lwp; 882 struct mount *trans_mp; 883 int flags; 884 bool modified; /* if we write out any pages */ 885 bool holds_wapbl; 886 bool cleanall; /* try to pull off from the syncer's list */ 887 bool onworklst; 888 bool nodirty; 889 const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0; 890 891 UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist); 892 893 KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)); 894 KASSERT((startoff & PAGE_MASK) == 0); 895 KASSERT((endoff & PAGE_MASK) == 0); 896 KASSERT(startoff < endoff || endoff == 0); 897 KASSERT(rw_write_held(slock)); 898 899 UVMHIST_LOG(ubchist, "vp %#jx pages %jd off 0x%jx len 0x%jx", 900 (uintptr_t)vp, uobj->uo_npages, startoff, endoff - startoff); 901 902#ifdef DIAGNOSTIC 903 if ((origflags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl) 904 WAPBL_JLOCK_ASSERT(vp->v_mount); 905#endif 906 907 trans_mp = NULL; 908 holds_wapbl = false; 909 910retry: 911 modified = false; 912 flags = origflags; 913 914 /* 915 * shortcut if we have no pages to process. 916 */ 917 918 nodirty = uvm_obj_clean_p(uobj); 919#ifdef DIAGNOSTIC 920 mutex_enter(vp->v_interlock); 921 KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty); 922 mutex_exit(vp->v_interlock); 923#endif 924 if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) { 925 mutex_enter(vp->v_interlock); 926 if (vp->v_iflag & VI_ONWORKLST && LIST_EMPTY(&vp->v_dirtyblkhd)) { 927 vn_syncer_remove_from_worklist(vp); 928 } 929 mutex_exit(vp->v_interlock); 930 if (trans_mp) { 931 if (holds_wapbl) 932 WAPBL_END(trans_mp); 933 fstrans_done(trans_mp); 934 } 935 rw_exit(slock); 936 return (0); 937 } 938 939 /* 940 * the vnode has pages, set up to process the request. 941 */ 942 943 if (trans_mp == NULL && (flags & PGO_CLEANIT) != 0) { 944 if (pagedaemon) { 945 /* Pagedaemon must not sleep here. */ 946 trans_mp = vp->v_mount; 947 error = fstrans_start_nowait(trans_mp); 948 if (error) { 949 rw_exit(slock); 950 return error; 951 } 952 } else { 953 /* 954 * Cannot use vdeadcheck() here as this operation 955 * usually gets used from VOP_RECLAIM(). Test for 956 * change of v_mount instead and retry on change. 957 */ 958 rw_exit(slock); 959 trans_mp = vp->v_mount; 960 fstrans_start(trans_mp); 961 if (vp->v_mount != trans_mp) { 962 fstrans_done(trans_mp); 963 trans_mp = NULL; 964 } else { 965 holds_wapbl = (trans_mp->mnt_wapbl && 966 (origflags & PGO_JOURNALLOCKED) == 0); 967 if (holds_wapbl) { 968 error = WAPBL_BEGIN(trans_mp); 969 if (error) { 970 fstrans_done(trans_mp); 971 return error; 972 } 973 } 974 } 975 rw_enter(slock, RW_WRITER); 976 goto retry; 977 } 978 } 979 980 error = 0; 981 wasclean = uvm_obj_nowriteback_p(uobj); 982 nextoff = startoff; 983 if (endoff == 0 || flags & PGO_ALLPAGES) { 984 endoff = trunc_page(LLONG_MAX); 985 } 986 987 /* 988 * if this vnode is known not to have dirty pages, 989 * don't bother to clean it out. 990 */ 991 992 if (nodirty) { 993 /* We handled the dirtyonly && nodirty case above. */ 994 KASSERT(!dirtyonly); 995 flags &= ~PGO_CLEANIT; 996 } 997 998 /* 999 * start the loop to scan pages. 1000 */ 1001 1002 cleanall = true; 1003 freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED; 1004 uvm_page_array_init(&a, uobj, dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY | 1005 (!async ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0); 1006 for (;;) { 1007 bool pgprotected; 1008 1009 /* 1010 * if !dirtyonly, iterate over all resident pages in the range. 1011 * 1012 * if dirtyonly, only possibly dirty pages are interesting. 1013 * however, if we are asked to sync for integrity, we should 1014 * wait on pages being written back by other threads as well. 1015 */ 1016 1017 pg = uvm_page_array_fill_and_peek(&a, nextoff, 0); 1018 if (pg == NULL) { 1019 break; 1020 } 1021 1022 KASSERT(pg->uobject == uobj); 1023 KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 || 1024 (pg->flags & (PG_BUSY)) != 0); 1025 KASSERT(pg->offset >= startoff); 1026 KASSERT(pg->offset >= nextoff); 1027 KASSERT(!dirtyonly || 1028 uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN || 1029 uvm_obj_page_writeback_p(pg)); 1030 1031 if (pg->offset >= endoff) { 1032 break; 1033 } 1034 1035 /* 1036 * a preempt point. 1037 */ 1038 1039 if (preempt_needed()) { 1040 nextoff = pg->offset; /* visit this page again */ 1041 rw_exit(slock); 1042 preempt(); 1043 /* 1044 * as we dropped the object lock, our cached pages can 1045 * be stale. 1046 */ 1047 uvm_page_array_clear(&a); 1048 rw_enter(slock, RW_WRITER); 1049 continue; 1050 } 1051 1052 /* 1053 * if the current page is busy, wait for it to become unbusy. 1054 */ 1055 1056 if ((pg->flags & PG_BUSY) != 0) { 1057 UVMHIST_LOG(ubchist, "busy %#jx", (uintptr_t)pg, 1058 0, 0, 0); 1059 if ((pg->flags & (PG_RELEASED|PG_PAGEOUT)) != 0 1060 && (flags & PGO_BUSYFAIL) != 0) { 1061 UVMHIST_LOG(ubchist, "busyfail %#jx", 1062 (uintptr_t)pg, 0, 0, 0); 1063 error = EDEADLK; 1064 if (busypg != NULL) 1065 *busypg = pg; 1066 break; 1067 } 1068 if (pagedaemon) { 1069 /* 1070 * someone has taken the page while we 1071 * dropped the lock for fstrans_start. 1072 */ 1073 break; 1074 } 1075 /* 1076 * don't bother to wait on other's activities 1077 * unless we are asked to sync for integrity. 1078 */ 1079 if (!async && (flags & PGO_RECLAIM) == 0) { 1080 wasclean = false; 1081 nextoff = pg->offset + PAGE_SIZE; 1082 uvm_page_array_advance(&a); 1083 continue; 1084 } 1085 nextoff = pg->offset; /* visit this page again */ 1086 uvm_pagewait(pg, slock, "genput"); 1087 /* 1088 * as we dropped the object lock, our cached pages can 1089 * be stale. 1090 */ 1091 uvm_page_array_clear(&a); 1092 rw_enter(slock, RW_WRITER); 1093 continue; 1094 } 1095 1096 nextoff = pg->offset + PAGE_SIZE; 1097 uvm_page_array_advance(&a); 1098 1099 /* 1100 * if we're freeing, remove all mappings of the page now. 1101 * if we're cleaning, check if the page is needs to be cleaned. 1102 */ 1103 1104 pgprotected = false; 1105 if (flags & PGO_FREE) { 1106 pmap_page_protect(pg, VM_PROT_NONE); 1107 pgprotected = true; 1108 } else if (flags & PGO_CLEANIT) { 1109 1110 /* 1111 * if we still have some hope to pull this vnode off 1112 * from the syncer queue, write-protect the page. 1113 */ 1114 1115 if (cleanall && wasclean) { 1116 1117 /* 1118 * uobj pages get wired only by uvm_fault 1119 * where uobj is locked. 1120 */ 1121 1122 if (pg->wire_count == 0) { 1123 pmap_page_protect(pg, 1124 VM_PROT_READ|VM_PROT_EXECUTE); 1125 pgprotected = true; 1126 } else { 1127 cleanall = false; 1128 } 1129 } 1130 } 1131 1132 if (flags & PGO_CLEANIT) { 1133 needs_clean = uvm_pagecheckdirty(pg, pgprotected); 1134 } else { 1135 needs_clean = false; 1136 } 1137 1138 /* 1139 * if we're cleaning, build a cluster. 1140 * the cluster will consist of pages which are currently dirty. 1141 * if not cleaning, just operate on the one page. 1142 */ 1143 1144 if (needs_clean) { 1145 wasclean = false; 1146 memset(pgs, 0, sizeof(pgs)); 1147 pg->flags |= PG_BUSY; 1148 UVM_PAGE_OWN(pg, "genfs_putpages"); 1149 1150 /* 1151 * let the fs constrain the offset range of the cluster. 1152 * we additionally constrain the range here such that 1153 * it fits in the "pgs" pages array. 1154 */ 1155 1156 off_t fslo, fshi, genlo, lo, off = pg->offset; 1157 GOP_PUTRANGE(vp, off, &fslo, &fshi); 1158 KASSERT(fslo == trunc_page(fslo)); 1159 KASSERT(fslo <= off); 1160 KASSERT(fshi == trunc_page(fshi)); 1161 KASSERT(fshi == 0 || off < fshi); 1162 1163 if (off > MAXPHYS / 2) 1164 genlo = trunc_page(off - (MAXPHYS / 2)); 1165 else 1166 genlo = 0; 1167 lo = MAX(fslo, genlo); 1168 1169 /* 1170 * first look backward. 1171 */ 1172 1173 npages = (off - lo) >> PAGE_SHIFT; 1174 nback = npages; 1175 uvn_findpages(uobj, off - PAGE_SIZE, &nback, 1176 &pgs[0], NULL, 1177 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD); 1178 if (nback) { 1179 memmove(&pgs[0], &pgs[npages - nback], 1180 nback * sizeof(pgs[0])); 1181 if (npages - nback < nback) 1182 memset(&pgs[nback], 0, 1183 (npages - nback) * sizeof(pgs[0])); 1184 else 1185 memset(&pgs[npages - nback], 0, 1186 nback * sizeof(pgs[0])); 1187 } 1188 1189 /* 1190 * then plug in our page of interest. 1191 */ 1192 1193 pgs[nback] = pg; 1194 1195 /* 1196 * then look forward to fill in the remaining space in 1197 * the array of pages. 1198 * 1199 * pass our cached array of pages so that hopefully 1200 * uvn_findpages can find some good pages in it. 1201 * the array a was filled above with the one of 1202 * following sets of flags: 1203 * 0 1204 * UVM_PAGE_ARRAY_FILL_DIRTY 1205 * UVM_PAGE_ARRAY_FILL_DIRTY|WRITEBACK 1206 * 1207 * XXX this is fragile but it'll work: the array 1208 * was earlier filled sparsely, but UFP_DIRTYONLY 1209 * implies dense. see corresponding comment in 1210 * uvn_findpages(). 1211 */ 1212 1213 npages = MAXPAGES - nback - 1; 1214 if (fshi) 1215 npages = MIN(npages, 1216 (fshi - off - 1) >> PAGE_SHIFT); 1217 uvn_findpages(uobj, off + PAGE_SIZE, &npages, 1218 &pgs[nback + 1], &a, 1219 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY); 1220 npages += nback + 1; 1221 } else { 1222 pgs[0] = pg; 1223 npages = 1; 1224 nback = 0; 1225 } 1226 1227 /* 1228 * apply FREE or DEACTIVATE options if requested. 1229 */ 1230 1231 for (i = 0; i < npages; i++) { 1232 tpg = pgs[i]; 1233 KASSERT(tpg->uobject == uobj); 1234 KASSERT(i == 0 || 1235 pgs[i-1]->offset + PAGE_SIZE == tpg->offset); 1236 KASSERT(!needs_clean || uvm_pagegetdirty(pgs[i]) != 1237 UVM_PAGE_STATUS_DIRTY); 1238 if (needs_clean) { 1239 /* 1240 * mark pages as WRITEBACK so that concurrent 1241 * fsync can find and wait for our activities. 1242 */ 1243 uvm_obj_page_set_writeback(pgs[i]); 1244 } 1245 if (tpg->offset < startoff || tpg->offset >= endoff) 1246 continue; 1247 if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) { 1248 uvm_pagelock(tpg); 1249 uvm_pagedeactivate(tpg); 1250 uvm_pageunlock(tpg); 1251 } else if (flags & PGO_FREE) { 1252 pmap_page_protect(tpg, VM_PROT_NONE); 1253 if (tpg->flags & PG_BUSY) { 1254 tpg->flags |= freeflag; 1255 if (pagedaemon) { 1256 uvm_pageout_start(1); 1257 uvm_pagelock(tpg); 1258 uvm_pagedequeue(tpg); 1259 uvm_pageunlock(tpg); 1260 } 1261 } else { 1262 1263 /* 1264 * ``page is not busy'' 1265 * implies that npages is 1 1266 * and needs_clean is false. 1267 */ 1268 1269 KASSERT(npages == 1); 1270 KASSERT(!needs_clean); 1271 KASSERT(pg == tpg); 1272 KASSERT(nextoff == 1273 tpg->offset + PAGE_SIZE); 1274 uvm_pagefree(tpg); 1275 if (pagedaemon) 1276 uvmexp.pdfreed++; 1277 } 1278 } 1279 } 1280 if (needs_clean) { 1281 modified = true; 1282 KASSERT(nextoff == pg->offset + PAGE_SIZE); 1283 KASSERT(nback < npages); 1284 nextoff = pg->offset + ((npages - nback) << PAGE_SHIFT); 1285 KASSERT(pgs[nback] == pg); 1286 KASSERT(nextoff == pgs[npages - 1]->offset + PAGE_SIZE); 1287 1288 /* 1289 * start the i/o. 1290 */ 1291 rw_exit(slock); 1292 error = GOP_WRITE(vp, pgs, npages, flags); 1293 /* 1294 * as we dropped the object lock, our cached pages can 1295 * be stale. 1296 */ 1297 uvm_page_array_clear(&a); 1298 rw_enter(slock, RW_WRITER); 1299 if (error) { 1300 break; 1301 } 1302 } 1303 } 1304 uvm_page_array_fini(&a); 1305 1306 /* 1307 * update ctime/mtime if the modification we started writing out might 1308 * be from mmap'ed write. 1309 * 1310 * this is necessary when an application keeps a file mmaped and 1311 * repeatedly modifies it via the window. note that, because we 1312 * don't always write-protect pages when cleaning, such modifications 1313 * might not involve any page faults. 1314 */ 1315 1316 mutex_enter(vp->v_interlock); 1317 if (modified && (vp->v_iflag & VI_WRMAP) != 0 && 1318 (vp->v_type != VBLK || 1319 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) { 1320 GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED); 1321 } 1322 1323 /* 1324 * if we no longer have any possibly dirty pages, take us off the 1325 * syncer list. 1326 */ 1327 1328 if ((vp->v_iflag & VI_ONWORKLST) != 0 && uvm_obj_clean_p(uobj) && 1329 LIST_EMPTY(&vp->v_dirtyblkhd)) { 1330 vn_syncer_remove_from_worklist(vp); 1331 } 1332 1333 /* Wait for output to complete. */ 1334 rw_exit(slock); 1335 if (!wasclean && !async && vp->v_numoutput != 0) { 1336 while (vp->v_numoutput != 0) 1337 cv_wait(&vp->v_cv, vp->v_interlock); 1338 } 1339 onworklst = (vp->v_iflag & VI_ONWORKLST) != 0; 1340 mutex_exit(vp->v_interlock); 1341 1342 if ((flags & PGO_RECLAIM) != 0 && onworklst) { 1343 /* 1344 * in the case of PGO_RECLAIM, ensure to make the vnode clean. 1345 * retrying is not a big deal because, in many cases, 1346 * uobj->uo_npages is already 0 here. 1347 */ 1348 rw_enter(slock, RW_WRITER); 1349 goto retry; 1350 } 1351 1352 if (trans_mp) { 1353 if (holds_wapbl) 1354 WAPBL_END(trans_mp); 1355 fstrans_done(trans_mp); 1356 } 1357 1358 return (error); 1359} 1360 1361/* 1362 * Default putrange method for file systems that do not care 1363 * how many pages are given to one GOP_WRITE() call. 1364 */ 1365void 1366genfs_gop_putrange(struct vnode *vp, off_t off, off_t *lop, off_t *hip) 1367{ 1368 1369 *lop = 0; 1370 *hip = 0; 1371} 1372 1373int 1374genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags) 1375{ 1376 off_t off; 1377 vaddr_t kva; 1378 size_t len; 1379 int error; 1380 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 1381 1382 UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx", 1383 (uintptr_t)vp, (uintptr_t)pgs, npages, flags); 1384 1385 off = pgs[0]->offset; 1386 kva = uvm_pagermapin(pgs, npages, 1387 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK); 1388 len = npages << PAGE_SHIFT; 1389 1390 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE, 1391 uvm_aio_aiodone); 1392 1393 return error; 1394} 1395 1396/* 1397 * genfs_gop_write_rwmap: 1398 * 1399 * a variant of genfs_gop_write. it's used by UDF for its directory buffers. 1400 * this maps pages with PROT_WRITE so that VOP_STRATEGY can modifies 1401 * the contents before writing it out to the underlying storage. 1402 */ 1403 1404int 1405genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, 1406 int flags) 1407{ 1408 off_t off; 1409 vaddr_t kva; 1410 size_t len; 1411 int error; 1412 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 1413 1414 UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx", 1415 (uintptr_t)vp, (uintptr_t)pgs, npages, flags); 1416 1417 off = pgs[0]->offset; 1418 kva = uvm_pagermapin(pgs, npages, 1419 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK); 1420 len = npages << PAGE_SHIFT; 1421 1422 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE, 1423 uvm_aio_aiodone); 1424 1425 return error; 1426} 1427 1428/* 1429 * Backend routine for doing I/O to vnode pages. Pages are already locked 1430 * and mapped into kernel memory. Here we just look up the underlying 1431 * device block addresses and call the strategy routine. 1432 */ 1433 1434static int 1435genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags, 1436 enum uio_rw rw, void (*iodone)(struct buf *)) 1437{ 1438 int s, error; 1439 int fs_bshift, dev_bshift; 1440 off_t eof, offset, startoffset; 1441 size_t bytes, iobytes, skipbytes; 1442 struct buf *mbp, *bp; 1443 const bool async = (flags & PGO_SYNCIO) == 0; 1444 const bool lazy = (flags & PGO_LAZY) == 0; 1445 const bool iowrite = rw == UIO_WRITE; 1446 const int brw = iowrite ? B_WRITE : B_READ; 1447 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 1448 1449 UVMHIST_LOG(ubchist, "vp %#jx kva %#jx len 0x%jx flags 0x%jx", 1450 (uintptr_t)vp, (uintptr_t)kva, len, flags); 1451 1452 KASSERT(vp->v_size != VSIZENOTSET); 1453 KASSERT(vp->v_writesize != VSIZENOTSET); 1454 KASSERTMSG(vp->v_size <= vp->v_writesize, "vp=%p" 1455 " v_size=0x%llx v_writesize=0x%llx", vp, 1456 (unsigned long long)vp->v_size, 1457 (unsigned long long)vp->v_writesize); 1458 GOP_SIZE(vp, vp->v_writesize, &eof, 0); 1459 if (vp->v_type != VBLK) { 1460 fs_bshift = vp->v_mount->mnt_fs_bshift; 1461 dev_bshift = vp->v_mount->mnt_dev_bshift; 1462 } else { 1463 fs_bshift = DEV_BSHIFT; 1464 dev_bshift = DEV_BSHIFT; 1465 } 1466 error = 0; 1467 startoffset = off; 1468 bytes = MIN(len, eof - startoffset); 1469 skipbytes = 0; 1470 KASSERT(bytes != 0); 1471 1472 if (iowrite) { 1473 /* 1474 * why += 2? 1475 * 1 for biodone, 1 for uvm_aio_aiodone. 1476 */ 1477 mutex_enter(vp->v_interlock); 1478 vp->v_numoutput += 2; 1479 mutex_exit(vp->v_interlock); 1480 } 1481 mbp = getiobuf(vp, true); 1482 UVMHIST_LOG(ubchist, "vp %#jx mbp %#jx num now %jd bytes 0x%jx", 1483 (uintptr_t)vp, (uintptr_t)mbp, vp->v_numoutput, bytes); 1484 mbp->b_bufsize = len; 1485 mbp->b_data = (void *)kva; 1486 mbp->b_resid = mbp->b_bcount = bytes; 1487 mbp->b_cflags |= BC_BUSY | BC_AGE; 1488 if (async) { 1489 mbp->b_flags = brw | B_ASYNC; 1490 mbp->b_iodone = iodone; 1491 } else { 1492 mbp->b_flags = brw; 1493 mbp->b_iodone = NULL; 1494 } 1495 if (curlwp == uvm.pagedaemon_lwp) 1496 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED); 1497 else if (async || lazy) 1498 BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL); 1499 else 1500 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL); 1501 1502 bp = NULL; 1503 for (offset = startoffset; 1504 bytes > 0; 1505 offset += iobytes, bytes -= iobytes) { 1506 int run; 1507 daddr_t lbn, blkno; 1508 struct vnode *devvp; 1509 1510 /* 1511 * bmap the file to find out the blkno to read from and 1512 * how much we can read in one i/o. if bmap returns an error, 1513 * skip the rest of the top-level i/o. 1514 */ 1515 1516 lbn = offset >> fs_bshift; 1517 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run); 1518 if (error) { 1519 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd", 1520 lbn, error, 0, 0); 1521 skipbytes += bytes; 1522 bytes = 0; 1523 goto loopdone; 1524 } 1525 1526 /* 1527 * see how many pages can be read with this i/o. 1528 * reduce the i/o size if necessary to avoid 1529 * overwriting pages with valid data. 1530 */ 1531 1532 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset, 1533 bytes); 1534 1535 /* 1536 * if this block isn't allocated, zero it instead of 1537 * reading it. unless we are going to allocate blocks, 1538 * mark the pages we zeroed PG_RDONLY. 1539 */ 1540 1541 if (blkno == (daddr_t)-1) { 1542 if (!iowrite) { 1543 memset((char *)kva + (offset - startoffset), 0, 1544 iobytes); 1545 } 1546 skipbytes += iobytes; 1547 continue; 1548 } 1549 1550 /* 1551 * allocate a sub-buf for this piece of the i/o 1552 * (or just use mbp if there's only 1 piece), 1553 * and start it going. 1554 */ 1555 1556 if (offset == startoffset && iobytes == bytes) { 1557 bp = mbp; 1558 } else { 1559 UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd", 1560 (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0); 1561 bp = getiobuf(vp, true); 1562 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes); 1563 } 1564 bp->b_lblkno = 0; 1565 1566 /* adjust physical blkno for partial blocks */ 1567 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >> 1568 dev_bshift); 1569 1570 UVMHIST_LOG(ubchist, 1571 "bp %#jx offset 0x%jx bcount 0x%jx blkno 0x%jx", 1572 (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno); 1573 1574 VOP_STRATEGY(devvp, bp); 1575 } 1576 1577loopdone: 1578 if (skipbytes) { 1579 UVMHIST_LOG(ubchist, "skipbytes %jd", skipbytes, 0,0,0); 1580 } 1581 nestiobuf_done(mbp, skipbytes, error); 1582 if (async) { 1583 UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0); 1584 return (0); 1585 } 1586 UVMHIST_LOG(ubchist, "waiting for mbp %#jx", (uintptr_t)mbp, 0, 0, 0); 1587 error = biowait(mbp); 1588 s = splbio(); 1589 (*iodone)(mbp); 1590 splx(s); 1591 UVMHIST_LOG(ubchist, "returning, error %jd", error, 0, 0, 0); 1592 return (error); 1593} 1594 1595int 1596genfs_compat_getpages(void *v) 1597{ 1598 struct vop_getpages_args /* { 1599 struct vnode *a_vp; 1600 voff_t a_offset; 1601 struct vm_page **a_m; 1602 int *a_count; 1603 int a_centeridx; 1604 vm_prot_t a_access_type; 1605 int a_advice; 1606 int a_flags; 1607 } */ *ap = v; 1608 1609 off_t origoffset; 1610 struct vnode *vp = ap->a_vp; 1611 struct uvm_object *uobj = &vp->v_uobj; 1612 struct vm_page *pg, **pgs; 1613 vaddr_t kva; 1614 int i, error, orignpages, npages; 1615 struct iovec iov; 1616 struct uio uio; 1617 kauth_cred_t cred = curlwp->l_cred; 1618 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0; 1619 1620 error = 0; 1621 origoffset = ap->a_offset; 1622 orignpages = *ap->a_count; 1623 pgs = ap->a_m; 1624 1625 if (ap->a_flags & PGO_LOCKED) { 1626 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, NULL, 1627 UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0)); 1628 1629 error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0; 1630 return error; 1631 } 1632 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) { 1633 rw_exit(uobj->vmobjlock); 1634 return EINVAL; 1635 } 1636 if ((ap->a_flags & PGO_SYNCIO) == 0) { 1637 rw_exit(uobj->vmobjlock); 1638 return 0; 1639 } 1640 npages = orignpages; 1641 uvn_findpages(uobj, origoffset, &npages, pgs, NULL, UFP_ALL); 1642 rw_exit(uobj->vmobjlock); 1643 kva = uvm_pagermapin(pgs, npages, 1644 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK); 1645 for (i = 0; i < npages; i++) { 1646 pg = pgs[i]; 1647 if ((pg->flags & PG_FAKE) == 0) { 1648 continue; 1649 } 1650 iov.iov_base = (char *)kva + (i << PAGE_SHIFT); 1651 iov.iov_len = PAGE_SIZE; 1652 uio.uio_iov = &iov; 1653 uio.uio_iovcnt = 1; 1654 uio.uio_offset = origoffset + (i << PAGE_SHIFT); 1655 uio.uio_rw = UIO_READ; 1656 uio.uio_resid = PAGE_SIZE; 1657 UIO_SETUP_SYSSPACE(&uio); 1658 /* XXX vn_lock */ 1659 error = VOP_READ(vp, &uio, 0, cred); 1660 if (error) { 1661 break; 1662 } 1663 if (uio.uio_resid) { 1664 memset(iov.iov_base, 0, uio.uio_resid); 1665 } 1666 } 1667 uvm_pagermapout(kva, npages); 1668 rw_enter(uobj->vmobjlock, RW_WRITER); 1669 for (i = 0; i < npages; i++) { 1670 pg = pgs[i]; 1671 if (error && (pg->flags & PG_FAKE) != 0) { 1672 pg->flags |= PG_RELEASED; 1673 } else { 1674 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN); 1675 uvm_pagelock(pg); 1676 uvm_pageactivate(pg); 1677 uvm_pageunlock(pg); 1678 } 1679 } 1680 if (error) { 1681 uvm_page_unbusy(pgs, npages); 1682 } 1683 rw_exit(uobj->vmobjlock); 1684 return error; 1685} 1686 1687int 1688genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, 1689 int flags) 1690{ 1691 off_t offset; 1692 struct iovec iov; 1693 struct uio uio; 1694 kauth_cred_t cred = curlwp->l_cred; 1695 struct buf *bp; 1696 vaddr_t kva; 1697 int error; 1698 1699 offset = pgs[0]->offset; 1700 kva = uvm_pagermapin(pgs, npages, 1701 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK); 1702 1703 iov.iov_base = (void *)kva; 1704 iov.iov_len = npages << PAGE_SHIFT; 1705 uio.uio_iov = &iov; 1706 uio.uio_iovcnt = 1; 1707 uio.uio_offset = offset; 1708 uio.uio_rw = UIO_WRITE; 1709 uio.uio_resid = npages << PAGE_SHIFT; 1710 UIO_SETUP_SYSSPACE(&uio); 1711 /* XXX vn_lock */ 1712 error = VOP_WRITE(vp, &uio, 0, cred); 1713 1714 mutex_enter(vp->v_interlock); 1715 vp->v_numoutput++; 1716 mutex_exit(vp->v_interlock); 1717 1718 bp = getiobuf(vp, true); 1719 bp->b_cflags |= BC_BUSY | BC_AGE; 1720 bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift; 1721 bp->b_data = (char *)kva; 1722 bp->b_bcount = npages << PAGE_SHIFT; 1723 bp->b_bufsize = npages << PAGE_SHIFT; 1724 bp->b_resid = 0; 1725 bp->b_error = error; 1726 uvm_aio_aiodone(bp); 1727 return (error); 1728} 1729 1730/* 1731 * Process a uio using direct I/O. If we reach a part of the request 1732 * which cannot be processed in this fashion for some reason, just return. 1733 * The caller must handle some additional part of the request using 1734 * buffered I/O before trying direct I/O again. 1735 */ 1736 1737void 1738genfs_directio(struct vnode *vp, struct uio *uio, int ioflag) 1739{ 1740 struct vmspace *vs; 1741 struct iovec *iov; 1742 vaddr_t va; 1743 size_t len; 1744 const int mask = DEV_BSIZE - 1; 1745 int error; 1746 bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl && 1747 (ioflag & IO_JOURNALLOCKED) == 0); 1748 1749#ifdef DIAGNOSTIC 1750 if ((ioflag & IO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl) 1751 WAPBL_JLOCK_ASSERT(vp->v_mount); 1752#endif 1753 1754 /* 1755 * We only support direct I/O to user space for now. 1756 */ 1757 1758 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) { 1759 return; 1760 } 1761 1762 /* 1763 * If the vnode is mapped, we would need to get the getpages lock 1764 * to stabilize the bmap, but then we would get into trouble while 1765 * locking the pages if the pages belong to this same vnode (or a 1766 * multi-vnode cascade to the same effect). Just fall back to 1767 * buffered I/O if the vnode is mapped to avoid this mess. 1768 */ 1769 1770 if (vp->v_vflag & VV_MAPPED) { 1771 return; 1772 } 1773 1774 if (need_wapbl) { 1775 error = WAPBL_BEGIN(vp->v_mount); 1776 if (error) 1777 return; 1778 } 1779 1780 /* 1781 * Do as much of the uio as possible with direct I/O. 1782 */ 1783 1784 vs = uio->uio_vmspace; 1785 while (uio->uio_resid) { 1786 iov = uio->uio_iov; 1787 if (iov->iov_len == 0) { 1788 uio->uio_iov++; 1789 uio->uio_iovcnt--; 1790 continue; 1791 } 1792 va = (vaddr_t)iov->iov_base; 1793 len = MIN(iov->iov_len, genfs_maxdio); 1794 len &= ~mask; 1795 1796 /* 1797 * If the next chunk is smaller than DEV_BSIZE or extends past 1798 * the current EOF, then fall back to buffered I/O. 1799 */ 1800 1801 if (len == 0 || uio->uio_offset + len > vp->v_size) { 1802 break; 1803 } 1804 1805 /* 1806 * Check alignment. The file offset must be at least 1807 * sector-aligned. The exact constraint on memory alignment 1808 * is very hardware-dependent, but requiring sector-aligned 1809 * addresses there too is safe. 1810 */ 1811 1812 if (uio->uio_offset & mask || va & mask) { 1813 break; 1814 } 1815 error = genfs_do_directio(vs, va, len, vp, uio->uio_offset, 1816 uio->uio_rw); 1817 if (error) { 1818 break; 1819 } 1820 iov->iov_base = (char *)iov->iov_base + len; 1821 iov->iov_len -= len; 1822 uio->uio_offset += len; 1823 uio->uio_resid -= len; 1824 } 1825 1826 if (need_wapbl) 1827 WAPBL_END(vp->v_mount); 1828} 1829 1830/* 1831 * Iodone routine for direct I/O. We don't do much here since the request is 1832 * always synchronous, so the caller will do most of the work after biowait(). 1833 */ 1834 1835static void 1836genfs_dio_iodone(struct buf *bp) 1837{ 1838 1839 KASSERT((bp->b_flags & B_ASYNC) == 0); 1840 if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) { 1841 mutex_enter(bp->b_objlock); 1842 vwakeup(bp); 1843 mutex_exit(bp->b_objlock); 1844 } 1845 putiobuf(bp); 1846} 1847 1848/* 1849 * Process one chunk of a direct I/O request. 1850 */ 1851 1852static int 1853genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp, 1854 off_t off, enum uio_rw rw) 1855{ 1856 struct vm_map *map; 1857 struct pmap *upm, *kpm __unused; 1858 size_t klen = round_page(uva + len) - trunc_page(uva); 1859 off_t spoff, epoff; 1860 vaddr_t kva, puva; 1861 paddr_t pa; 1862 vm_prot_t prot; 1863 int error, rv __diagused, poff, koff; 1864 const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED | 1865 (rw == UIO_WRITE ? PGO_FREE : 0); 1866 1867 /* 1868 * For writes, verify that this range of the file already has fully 1869 * allocated backing store. If there are any holes, just punt and 1870 * make the caller take the buffered write path. 1871 */ 1872 1873 if (rw == UIO_WRITE) { 1874 daddr_t lbn, elbn, blkno; 1875 int bsize, bshift, run; 1876 1877 bshift = vp->v_mount->mnt_fs_bshift; 1878 bsize = 1 << bshift; 1879 lbn = off >> bshift; 1880 elbn = (off + len + bsize - 1) >> bshift; 1881 while (lbn < elbn) { 1882 error = VOP_BMAP(vp, lbn, NULL, &blkno, &run); 1883 if (error) { 1884 return error; 1885 } 1886 if (blkno == (daddr_t)-1) { 1887 return ENOSPC; 1888 } 1889 lbn += 1 + run; 1890 } 1891 } 1892 1893 /* 1894 * Flush any cached pages for parts of the file that we're about to 1895 * access. If we're writing, invalidate pages as well. 1896 */ 1897 1898 spoff = trunc_page(off); 1899 epoff = round_page(off + len); 1900 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 1901 error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags); 1902 if (error) { 1903 return error; 1904 } 1905 1906 /* 1907 * Wire the user pages and remap them into kernel memory. 1908 */ 1909 1910 prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ; 1911 error = uvm_vslock(vs, (void *)uva, len, prot); 1912 if (error) { 1913 return error; 1914 } 1915 1916 map = &vs->vm_map; 1917 upm = vm_map_pmap(map); 1918 kpm = vm_map_pmap(kernel_map); 1919 puva = trunc_page(uva); 1920 kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask, 1921 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH); 1922 for (poff = 0; poff < klen; poff += PAGE_SIZE) { 1923 rv = pmap_extract(upm, puva + poff, &pa); 1924 KASSERT(rv); 1925 pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED); 1926 } 1927 pmap_update(kpm); 1928 1929 /* 1930 * Do the I/O. 1931 */ 1932 1933 koff = uva - trunc_page(uva); 1934 error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw, 1935 genfs_dio_iodone); 1936 1937 /* 1938 * Tear down the kernel mapping. 1939 */ 1940 1941 pmap_kremove(kva, klen); 1942 pmap_update(kpm); 1943 uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY); 1944 1945 /* 1946 * Unwire the user pages. 1947 */ 1948 1949 uvm_vsunlock(vs, (void *)uva, len); 1950 return error; 1951} 1952