genfs_io.c revision 1.53
1/* $NetBSD: genfs_io.c,v 1.53 2011/10/31 12:49:32 yamt Exp $ */ 2 3/* 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33#include <sys/cdefs.h> 34__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53 2011/10/31 12:49:32 yamt Exp $"); 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/proc.h> 39#include <sys/kernel.h> 40#include <sys/mount.h> 41#include <sys/vnode.h> 42#include <sys/kmem.h> 43#include <sys/kauth.h> 44#include <sys/fstrans.h> 45#include <sys/buf.h> 46 47#include <miscfs/genfs/genfs.h> 48#include <miscfs/genfs/genfs_node.h> 49#include <miscfs/specfs/specdev.h> 50#include <miscfs/syncfs/syncfs.h> 51 52#include <uvm/uvm.h> 53#include <uvm/uvm_pager.h> 54 55static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *, 56 off_t, enum uio_rw); 57static void genfs_dio_iodone(struct buf *); 58 59static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw, 60 void (*)(struct buf *)); 61static void genfs_rel_pages(struct vm_page **, int); 62static void genfs_markdirty(struct vnode *); 63 64int genfs_maxdio = MAXPHYS; 65 66static void 67genfs_rel_pages(struct vm_page **pgs, int npages) 68{ 69 int i; 70 71 for (i = 0; i < npages; i++) { 72 struct vm_page *pg = pgs[i]; 73 74 if (pg == NULL || pg == PGO_DONTCARE) 75 continue; 76 if (pg->flags & PG_FAKE) { 77 pg->flags |= PG_RELEASED; 78 } 79 } 80 mutex_enter(&uvm_pageqlock); 81 uvm_page_unbusy(pgs, npages); 82 mutex_exit(&uvm_pageqlock); 83} 84 85static void 86genfs_markdirty(struct vnode *vp) 87{ 88 struct genfs_node * const gp = VTOG(vp); 89 90 KASSERT(mutex_owned(vp->v_interlock)); 91 gp->g_dirtygen++; 92 if ((vp->v_iflag & VI_ONWORKLST) == 0) { 93 vn_syncer_add_to_worklist(vp, filedelay); 94 } 95 if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) { 96 vp->v_iflag |= VI_WRMAPDIRTY; 97 } 98} 99 100/* 101 * generic VM getpages routine. 102 * Return PG_BUSY pages for the given range, 103 * reading from backing store if necessary. 104 */ 105 106int 107genfs_getpages(void *v) 108{ 109 struct vop_getpages_args /* { 110 struct vnode *a_vp; 111 voff_t a_offset; 112 struct vm_page **a_m; 113 int *a_count; 114 int a_centeridx; 115 vm_prot_t a_access_type; 116 int a_advice; 117 int a_flags; 118 } */ * const ap = v; 119 120 off_t diskeof, memeof; 121 int i, error, npages; 122 const int flags = ap->a_flags; 123 struct vnode * const vp = ap->a_vp; 124 struct uvm_object * const uobj = &vp->v_uobj; 125 kauth_cred_t const cred = curlwp->l_cred; /* XXXUBC curlwp */ 126 const bool async = (flags & PGO_SYNCIO) == 0; 127 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0; 128 const bool overwrite = (flags & PGO_OVERWRITE) != 0; 129 const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0; 130 const bool glocked = (flags & PGO_GLOCKHELD) != 0; 131 const bool need_wapbl = blockalloc && vp->v_mount->mnt_wapbl; 132 bool has_trans_wapbl = false; 133 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist); 134 135 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d", 136 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count); 137 138 KASSERT(vp->v_type == VREG || vp->v_type == VDIR || 139 vp->v_type == VLNK || vp->v_type == VBLK); 140 141startover: 142 error = 0; 143 const voff_t origvsize = vp->v_size; 144 const off_t origoffset = ap->a_offset; 145 const int orignpages = *ap->a_count; 146 147 GOP_SIZE(vp, origvsize, &diskeof, 0); 148 if (flags & PGO_PASTEOF) { 149 off_t newsize; 150#if defined(DIAGNOSTIC) 151 off_t writeeof; 152#endif /* defined(DIAGNOSTIC) */ 153 154 newsize = MAX(origvsize, 155 origoffset + (orignpages << PAGE_SHIFT)); 156 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM); 157#if defined(DIAGNOSTIC) 158 GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM); 159 if (newsize > round_page(writeeof)) { 160 panic("%s: past eof: %" PRId64 " vs. %" PRId64, 161 __func__, newsize, round_page(writeeof)); 162 } 163#endif /* defined(DIAGNOSTIC) */ 164 } else { 165 GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM); 166 } 167 KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages); 168 KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0); 169 KASSERT(orignpages > 0); 170 171 /* 172 * Bounds-check the request. 173 */ 174 175 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) { 176 if ((flags & PGO_LOCKED) == 0) { 177 mutex_exit(uobj->vmobjlock); 178 } 179 UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x", 180 origoffset, *ap->a_count, memeof,0); 181 error = EINVAL; 182 goto out_err; 183 } 184 185 /* uobj is locked */ 186 187 if ((flags & PGO_NOTIMESTAMP) == 0 && 188 (vp->v_type != VBLK || 189 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) { 190 int updflags = 0; 191 192 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) { 193 updflags = GOP_UPDATE_ACCESSED; 194 } 195 if (memwrite) { 196 updflags |= GOP_UPDATE_MODIFIED; 197 } 198 if (updflags != 0) { 199 GOP_MARKUPDATE(vp, updflags); 200 } 201 } 202 203 /* 204 * For PGO_LOCKED requests, just return whatever's in memory. 205 */ 206 207 if (flags & PGO_LOCKED) { 208 int nfound; 209 struct vm_page *pg; 210 211 KASSERT(!glocked); 212 npages = *ap->a_count; 213#if defined(DEBUG) 214 for (i = 0; i < npages; i++) { 215 pg = ap->a_m[i]; 216 KASSERT(pg == NULL || pg == PGO_DONTCARE); 217 } 218#endif /* defined(DEBUG) */ 219 nfound = uvn_findpages(uobj, origoffset, &npages, 220 ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0)); 221 KASSERT(npages == *ap->a_count); 222 if (nfound == 0) { 223 error = EBUSY; 224 goto out_err; 225 } 226 if (!genfs_node_rdtrylock(vp)) { 227 genfs_rel_pages(ap->a_m, npages); 228 229 /* 230 * restore the array. 231 */ 232 233 for (i = 0; i < npages; i++) { 234 pg = ap->a_m[i]; 235 236 if (pg != NULL && pg != PGO_DONTCARE) { 237 ap->a_m[i] = NULL; 238 } 239 KASSERT(ap->a_m[i] == NULL || 240 ap->a_m[i] == PGO_DONTCARE); 241 } 242 } else { 243 genfs_node_unlock(vp); 244 } 245 error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0); 246 if (error == 0 && memwrite) { 247 genfs_markdirty(vp); 248 } 249 goto out_err; 250 } 251 mutex_exit(uobj->vmobjlock); 252 253 /* 254 * find the requested pages and make some simple checks. 255 * leave space in the page array for a whole block. 256 */ 257 258 const int fs_bshift = (vp->v_type != VBLK) ? 259 vp->v_mount->mnt_fs_bshift : DEV_BSHIFT; 260 const int dev_bshift = (vp->v_type != VBLK) ? 261 vp->v_mount->mnt_dev_bshift : DEV_BSHIFT; 262 const int fs_bsize = 1 << fs_bshift; 263#define blk_mask (fs_bsize - 1) 264#define trunc_blk(x) ((x) & ~blk_mask) 265#define round_blk(x) (((x) + blk_mask) & ~blk_mask) 266 267 const int orignmempages = MIN(orignpages, 268 round_page(memeof - origoffset) >> PAGE_SHIFT); 269 npages = orignmempages; 270 const off_t startoffset = trunc_blk(origoffset); 271 const off_t endoffset = MIN( 272 round_page(round_blk(origoffset + (npages << PAGE_SHIFT))), 273 round_page(memeof)); 274 const int ridx = (origoffset - startoffset) >> PAGE_SHIFT; 275 276 const int pgs_size = sizeof(struct vm_page *) * 277 ((endoffset - startoffset) >> PAGE_SHIFT); 278 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES]; 279 280 if (pgs_size > sizeof(pgs_onstack)) { 281 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP); 282 if (pgs == NULL) { 283 pgs = pgs_onstack; 284 error = ENOMEM; 285 goto out_err; 286 } 287 } else { 288 pgs = pgs_onstack; 289 (void)memset(pgs, 0, pgs_size); 290 } 291 292 UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld", 293 ridx, npages, startoffset, endoffset); 294 295 if (!has_trans_wapbl) { 296 fstrans_start(vp->v_mount, FSTRANS_SHARED); 297 /* 298 * XXX: This assumes that we come here only via 299 * the mmio path 300 */ 301 if (need_wapbl) { 302 error = WAPBL_BEGIN(vp->v_mount); 303 if (error) { 304 fstrans_done(vp->v_mount); 305 goto out_err_free; 306 } 307 } 308 has_trans_wapbl = true; 309 } 310 311 /* 312 * hold g_glock to prevent a race with truncate. 313 * 314 * check if our idea of v_size is still valid. 315 */ 316 317 KASSERT(!glocked || genfs_node_wrlocked(vp)); 318 if (!glocked) { 319 if (blockalloc) { 320 genfs_node_wrlock(vp); 321 } else { 322 genfs_node_rdlock(vp); 323 } 324 } 325 mutex_enter(uobj->vmobjlock); 326 if (vp->v_size < origvsize) { 327 if (!glocked) { 328 genfs_node_unlock(vp); 329 } 330 if (pgs != pgs_onstack) 331 kmem_free(pgs, pgs_size); 332 goto startover; 333 } 334 335 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], 336 async ? UFP_NOWAIT : UFP_ALL) != orignmempages) { 337 if (!glocked) { 338 genfs_node_unlock(vp); 339 } 340 KASSERT(async != 0); 341 genfs_rel_pages(&pgs[ridx], orignmempages); 342 mutex_exit(uobj->vmobjlock); 343 error = EBUSY; 344 goto out_err_free; 345 } 346 347 /* 348 * if the pages are already resident, just return them. 349 */ 350 351 for (i = 0; i < npages; i++) { 352 struct vm_page *pg = pgs[ridx + i]; 353 354 if ((pg->flags & PG_FAKE) || 355 (blockalloc && (pg->flags & PG_RDONLY))) { 356 break; 357 } 358 } 359 if (i == npages) { 360 if (!glocked) { 361 genfs_node_unlock(vp); 362 } 363 UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0); 364 npages += ridx; 365 goto out; 366 } 367 368 /* 369 * if PGO_OVERWRITE is set, don't bother reading the pages. 370 */ 371 372 if (overwrite) { 373 if (!glocked) { 374 genfs_node_unlock(vp); 375 } 376 UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0); 377 378 for (i = 0; i < npages; i++) { 379 struct vm_page *pg = pgs[ridx + i]; 380 381 pg->flags &= ~(PG_RDONLY|PG_CLEAN); 382 } 383 npages += ridx; 384 goto out; 385 } 386 387 /* 388 * the page wasn't resident and we're not overwriting, 389 * so we're going to have to do some i/o. 390 * find any additional pages needed to cover the expanded range. 391 */ 392 393 npages = (endoffset - startoffset) >> PAGE_SHIFT; 394 if (startoffset != origoffset || npages != orignmempages) { 395 int npgs; 396 397 /* 398 * we need to avoid deadlocks caused by locking 399 * additional pages at lower offsets than pages we 400 * already have locked. unlock them all and start over. 401 */ 402 403 genfs_rel_pages(&pgs[ridx], orignmempages); 404 memset(pgs, 0, pgs_size); 405 406 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x", 407 startoffset, endoffset, 0,0); 408 npgs = npages; 409 if (uvn_findpages(uobj, startoffset, &npgs, pgs, 410 async ? UFP_NOWAIT : UFP_ALL) != npages) { 411 if (!glocked) { 412 genfs_node_unlock(vp); 413 } 414 KASSERT(async != 0); 415 genfs_rel_pages(pgs, npages); 416 mutex_exit(uobj->vmobjlock); 417 error = EBUSY; 418 goto out_err_free; 419 } 420 } 421 422 mutex_exit(uobj->vmobjlock); 423 424 { 425 size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes; 426 vaddr_t kva; 427 struct buf *bp, *mbp; 428 bool sawhole = false; 429 430 /* 431 * read the desired page(s). 432 */ 433 434 totalbytes = npages << PAGE_SHIFT; 435 bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0)); 436 tailbytes = totalbytes - bytes; 437 skipbytes = 0; 438 439 kva = uvm_pagermapin(pgs, npages, 440 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK); 441 442 mbp = getiobuf(vp, true); 443 mbp->b_bufsize = totalbytes; 444 mbp->b_data = (void *)kva; 445 mbp->b_resid = mbp->b_bcount = bytes; 446 mbp->b_cflags = BC_BUSY; 447 if (async) { 448 mbp->b_flags = B_READ | B_ASYNC; 449 mbp->b_iodone = uvm_aio_biodone; 450 } else { 451 mbp->b_flags = B_READ; 452 mbp->b_iodone = NULL; 453 } 454 if (async) 455 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED); 456 else 457 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL); 458 459 /* 460 * if EOF is in the middle of the range, zero the part past EOF. 461 * skip over pages which are not PG_FAKE since in that case they have 462 * valid data that we need to preserve. 463 */ 464 465 tailstart = bytes; 466 while (tailbytes > 0) { 467 const int len = PAGE_SIZE - (tailstart & PAGE_MASK); 468 469 KASSERT(len <= tailbytes); 470 if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) { 471 memset((void *)(kva + tailstart), 0, len); 472 UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x", 473 kva, tailstart, len, 0); 474 } 475 tailstart += len; 476 tailbytes -= len; 477 } 478 479 /* 480 * now loop over the pages, reading as needed. 481 */ 482 483 bp = NULL; 484 off_t offset; 485 for (offset = startoffset; 486 bytes > 0; 487 offset += iobytes, bytes -= iobytes) { 488 int run; 489 daddr_t lbn, blkno; 490 int pidx; 491 struct vnode *devvp; 492 493 /* 494 * skip pages which don't need to be read. 495 */ 496 497 pidx = (offset - startoffset) >> PAGE_SHIFT; 498 while ((pgs[pidx]->flags & PG_FAKE) == 0) { 499 size_t b; 500 501 KASSERT((offset & (PAGE_SIZE - 1)) == 0); 502 if ((pgs[pidx]->flags & PG_RDONLY)) { 503 sawhole = true; 504 } 505 b = MIN(PAGE_SIZE, bytes); 506 offset += b; 507 bytes -= b; 508 skipbytes += b; 509 pidx++; 510 UVMHIST_LOG(ubchist, "skipping, new offset 0x%x", 511 offset, 0,0,0); 512 if (bytes == 0) { 513 goto loopdone; 514 } 515 } 516 517 /* 518 * bmap the file to find out the blkno to read from and 519 * how much we can read in one i/o. if bmap returns an error, 520 * skip the rest of the top-level i/o. 521 */ 522 523 lbn = offset >> fs_bshift; 524 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run); 525 if (error) { 526 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n", 527 lbn,error,0,0); 528 skipbytes += bytes; 529 bytes = 0; 530 goto loopdone; 531 } 532 533 /* 534 * see how many pages can be read with this i/o. 535 * reduce the i/o size if necessary to avoid 536 * overwriting pages with valid data. 537 */ 538 539 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset, 540 bytes); 541 if (offset + iobytes > round_page(offset)) { 542 int pcount; 543 544 pcount = 1; 545 while (pidx + pcount < npages && 546 pgs[pidx + pcount]->flags & PG_FAKE) { 547 pcount++; 548 } 549 iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) - 550 (offset - trunc_page(offset))); 551 } 552 553 /* 554 * if this block isn't allocated, zero it instead of 555 * reading it. unless we are going to allocate blocks, 556 * mark the pages we zeroed PG_RDONLY. 557 */ 558 559 if (blkno == (daddr_t)-1) { 560 int holepages = (round_page(offset + iobytes) - 561 trunc_page(offset)) >> PAGE_SHIFT; 562 UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0); 563 564 sawhole = true; 565 memset((char *)kva + (offset - startoffset), 0, 566 iobytes); 567 skipbytes += iobytes; 568 569 mutex_enter(uobj->vmobjlock); 570 for (i = 0; i < holepages; i++) { 571 if (memwrite) { 572 pgs[pidx + i]->flags &= ~PG_CLEAN; 573 } 574 if (!blockalloc) { 575 pgs[pidx + i]->flags |= PG_RDONLY; 576 } 577 } 578 mutex_exit(uobj->vmobjlock); 579 continue; 580 } 581 582 /* 583 * allocate a sub-buf for this piece of the i/o 584 * (or just use mbp if there's only 1 piece), 585 * and start it going. 586 */ 587 588 if (offset == startoffset && iobytes == bytes) { 589 bp = mbp; 590 } else { 591 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d", 592 vp, bp, vp->v_numoutput, 0); 593 bp = getiobuf(vp, true); 594 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes); 595 } 596 bp->b_lblkno = 0; 597 598 /* adjust physical blkno for partial blocks */ 599 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >> 600 dev_bshift); 601 602 UVMHIST_LOG(ubchist, 603 "bp %p offset 0x%x bcount 0x%x blkno 0x%x", 604 bp, offset, bp->b_bcount, bp->b_blkno); 605 606 VOP_STRATEGY(devvp, bp); 607 } 608 609loopdone: 610 nestiobuf_done(mbp, skipbytes, error); 611 if (async) { 612 UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0); 613 if (!glocked) { 614 genfs_node_unlock(vp); 615 } 616 error = 0; 617 goto out_err_free; 618 } 619 if (bp != NULL) { 620 error = biowait(mbp); 621 } 622 623 /* Remove the mapping (make KVA available as soon as possible) */ 624 uvm_pagermapout(kva, npages); 625 626 /* 627 * if this we encountered a hole then we have to do a little more work. 628 * for read faults, we marked the page PG_RDONLY so that future 629 * write accesses to the page will fault again. 630 * for write faults, we must make sure that the backing store for 631 * the page is completely allocated while the pages are locked. 632 */ 633 634 if (!error && sawhole && blockalloc) { 635 error = GOP_ALLOC(vp, startoffset, 636 npages << PAGE_SHIFT, 0, cred); 637 UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d", 638 startoffset, npages << PAGE_SHIFT, error,0); 639 if (!error) { 640 mutex_enter(uobj->vmobjlock); 641 for (i = 0; i < npages; i++) { 642 struct vm_page *pg = pgs[i]; 643 644 if (pg == NULL) { 645 continue; 646 } 647 pg->flags &= ~(PG_CLEAN|PG_RDONLY); 648 UVMHIST_LOG(ubchist, "mark dirty pg %p", 649 pg,0,0,0); 650 } 651 mutex_exit(uobj->vmobjlock); 652 } 653 } 654 if (!glocked) { 655 genfs_node_unlock(vp); 656 } 657 658 putiobuf(mbp); 659 } 660 661 mutex_enter(uobj->vmobjlock); 662 663 /* 664 * we're almost done! release the pages... 665 * for errors, we free the pages. 666 * otherwise we activate them and mark them as valid and clean. 667 * also, unbusy pages that were not actually requested. 668 */ 669 670 if (error) { 671 for (i = 0; i < npages; i++) { 672 struct vm_page *pg = pgs[i]; 673 674 if (pg == NULL) { 675 continue; 676 } 677 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x", 678 pg, pg->flags, 0,0); 679 if (pg->flags & PG_FAKE) { 680 pg->flags |= PG_RELEASED; 681 } 682 } 683 mutex_enter(&uvm_pageqlock); 684 uvm_page_unbusy(pgs, npages); 685 mutex_exit(&uvm_pageqlock); 686 mutex_exit(uobj->vmobjlock); 687 UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0); 688 goto out_err_free; 689 } 690 691out: 692 UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0); 693 error = 0; 694 mutex_enter(&uvm_pageqlock); 695 for (i = 0; i < npages; i++) { 696 struct vm_page *pg = pgs[i]; 697 if (pg == NULL) { 698 continue; 699 } 700 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x", 701 pg, pg->flags, 0,0); 702 if (pg->flags & PG_FAKE && !overwrite) { 703 pg->flags &= ~(PG_FAKE); 704 pmap_clear_modify(pgs[i]); 705 } 706 KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0); 707 if (i < ridx || i >= ridx + orignmempages || async) { 708 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x", 709 pg, pg->offset,0,0); 710 if (pg->flags & PG_WANTED) { 711 wakeup(pg); 712 } 713 if (pg->flags & PG_FAKE) { 714 KASSERT(overwrite); 715 uvm_pagezero(pg); 716 } 717 if (pg->flags & PG_RELEASED) { 718 uvm_pagefree(pg); 719 continue; 720 } 721 uvm_pageenqueue(pg); 722 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); 723 UVM_PAGE_OWN(pg, NULL); 724 } 725 } 726 mutex_exit(&uvm_pageqlock); 727 if (memwrite) { 728 genfs_markdirty(vp); 729 } 730 mutex_exit(uobj->vmobjlock); 731 if (ap->a_m != NULL) { 732 memcpy(ap->a_m, &pgs[ridx], 733 orignmempages * sizeof(struct vm_page *)); 734 } 735 736out_err_free: 737 if (pgs != NULL && pgs != pgs_onstack) 738 kmem_free(pgs, pgs_size); 739out_err: 740 if (has_trans_wapbl) { 741 if (need_wapbl) 742 WAPBL_END(vp->v_mount); 743 fstrans_done(vp->v_mount); 744 } 745 return error; 746} 747 748/* 749 * generic VM putpages routine. 750 * Write the given range of pages to backing store. 751 * 752 * => "offhi == 0" means flush all pages at or after "offlo". 753 * => object should be locked by caller. we return with the 754 * object unlocked. 755 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O). 756 * thus, a caller might want to unlock higher level resources 757 * (e.g. vm_map) before calling flush. 758 * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block 759 * => if PGO_ALLPAGES is set, then all pages in the object will be processed. 760 * => NOTE: we rely on the fact that the object's memq is a TAILQ and 761 * that new pages are inserted on the tail end of the list. thus, 762 * we can make a complete pass through the object in one go by starting 763 * at the head and working towards the tail (new pages are put in 764 * front of us). 765 * => NOTE: we are allowed to lock the page queues, so the caller 766 * must not be holding the page queue lock. 767 * 768 * note on "cleaning" object and PG_BUSY pages: 769 * this routine is holding the lock on the object. the only time 770 * that it can run into a PG_BUSY page that it does not own is if 771 * some other process has started I/O on the page (e.g. either 772 * a pagein, or a pageout). if the PG_BUSY page is being paged 773 * in, then it can not be dirty (!PG_CLEAN) because no one has 774 * had a chance to modify it yet. if the PG_BUSY page is being 775 * paged out then it means that someone else has already started 776 * cleaning the page for us (how nice!). in this case, if we 777 * have syncio specified, then after we make our pass through the 778 * object we need to wait for the other PG_BUSY pages to clear 779 * off (i.e. we need to do an iosync). also note that once a 780 * page is PG_BUSY it must stay in its object until it is un-busyed. 781 * 782 * note on page traversal: 783 * we can traverse the pages in an object either by going down the 784 * linked list in "uobj->memq", or we can go over the address range 785 * by page doing hash table lookups for each address. depending 786 * on how many pages are in the object it may be cheaper to do one 787 * or the other. we set "by_list" to true if we are using memq. 788 * if the cost of a hash lookup was equal to the cost of the list 789 * traversal we could compare the number of pages in the start->stop 790 * range to the total number of pages in the object. however, it 791 * seems that a hash table lookup is more expensive than the linked 792 * list traversal, so we multiply the number of pages in the 793 * range by an estimate of the relatively higher cost of the hash lookup. 794 */ 795 796int 797genfs_putpages(void *v) 798{ 799 struct vop_putpages_args /* { 800 struct vnode *a_vp; 801 voff_t a_offlo; 802 voff_t a_offhi; 803 int a_flags; 804 } */ * const ap = v; 805 806 return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi, 807 ap->a_flags, NULL); 808} 809 810int 811genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, 812 int origflags, struct vm_page **busypg) 813{ 814 struct uvm_object * const uobj = &vp->v_uobj; 815 kmutex_t * const slock = uobj->vmobjlock; 816 off_t off; 817 /* Even for strange MAXPHYS, the shift rounds down to a page */ 818#define maxpages (MAXPHYS >> PAGE_SHIFT) 819 int i, error, npages, nback; 820 int freeflag; 821 struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp; 822 bool wasclean, by_list, needs_clean, yld; 823 bool async = (origflags & PGO_SYNCIO) == 0; 824 bool pagedaemon = curlwp == uvm.pagedaemon_lwp; 825 struct lwp * const l = curlwp ? curlwp : &lwp0; 826 struct genfs_node * const gp = VTOG(vp); 827 int flags; 828 int dirtygen; 829 bool modified; 830 bool need_wapbl; 831 bool has_trans; 832 bool cleanall; 833 bool onworklst; 834 835 UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist); 836 837 KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)); 838 KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0); 839 KASSERT(startoff < endoff || endoff == 0); 840 841 UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x", 842 vp, uobj->uo_npages, startoff, endoff - startoff); 843 844 has_trans = false; 845 need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl && 846 (origflags & PGO_JOURNALLOCKED) == 0); 847 848retry: 849 modified = false; 850 flags = origflags; 851 KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || 852 (vp->v_iflag & VI_WRMAPDIRTY) == 0); 853 if (uobj->uo_npages == 0) { 854 if (vp->v_iflag & VI_ONWORKLST) { 855 vp->v_iflag &= ~VI_WRMAPDIRTY; 856 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) 857 vn_syncer_remove_from_worklist(vp); 858 } 859 if (has_trans) { 860 if (need_wapbl) 861 WAPBL_END(vp->v_mount); 862 fstrans_done(vp->v_mount); 863 } 864 mutex_exit(slock); 865 return (0); 866 } 867 868 /* 869 * the vnode has pages, set up to process the request. 870 */ 871 872 if (!has_trans && (flags & PGO_CLEANIT) != 0) { 873 mutex_exit(slock); 874 if (pagedaemon) { 875 error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY); 876 if (error) 877 return error; 878 } else 879 fstrans_start(vp->v_mount, FSTRANS_LAZY); 880 if (need_wapbl) { 881 error = WAPBL_BEGIN(vp->v_mount); 882 if (error) { 883 fstrans_done(vp->v_mount); 884 return error; 885 } 886 } 887 has_trans = true; 888 mutex_enter(slock); 889 goto retry; 890 } 891 892 error = 0; 893 wasclean = (vp->v_numoutput == 0); 894 off = startoff; 895 if (endoff == 0 || flags & PGO_ALLPAGES) { 896 endoff = trunc_page(LLONG_MAX); 897 } 898 by_list = (uobj->uo_npages <= 899 ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY); 900 901 /* 902 * if this vnode is known not to have dirty pages, 903 * don't bother to clean it out. 904 */ 905 906 if ((vp->v_iflag & VI_ONWORKLST) == 0) { 907#if !defined(DEBUG) 908 if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) { 909 goto skip_scan; 910 } 911#endif /* !defined(DEBUG) */ 912 flags &= ~PGO_CLEANIT; 913 } 914 915 /* 916 * start the loop. when scanning by list, hold the last page 917 * in the list before we start. pages allocated after we start 918 * will be added to the end of the list, so we can stop at the 919 * current last page. 920 */ 921 922 cleanall = (flags & PGO_CLEANIT) != 0 && wasclean && 923 startoff == 0 && endoff == trunc_page(LLONG_MAX) && 924 (vp->v_iflag & VI_ONWORKLST) != 0; 925 dirtygen = gp->g_dirtygen; 926 freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED; 927 if (by_list) { 928 curmp.flags = PG_MARKER; 929 endmp.flags = PG_MARKER; 930 pg = TAILQ_FIRST(&uobj->memq); 931 TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue); 932 } else { 933 pg = uvm_pagelookup(uobj, off); 934 } 935 nextpg = NULL; 936 while (by_list || off < endoff) { 937 938 /* 939 * if the current page is not interesting, move on to the next. 940 */ 941 942 KASSERT(pg == NULL || pg->uobject == uobj || 943 (pg->flags & PG_MARKER) != 0); 944 KASSERT(pg == NULL || 945 (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 || 946 (pg->flags & (PG_BUSY|PG_MARKER)) != 0); 947 if (by_list) { 948 if (pg == &endmp) { 949 break; 950 } 951 if (pg->flags & PG_MARKER) { 952 pg = TAILQ_NEXT(pg, listq.queue); 953 continue; 954 } 955 if (pg->offset < startoff || pg->offset >= endoff || 956 pg->flags & (PG_RELEASED|PG_PAGEOUT)) { 957 if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) { 958 wasclean = false; 959 } 960 pg = TAILQ_NEXT(pg, listq.queue); 961 continue; 962 } 963 off = pg->offset; 964 } else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) { 965 if (pg != NULL) { 966 wasclean = false; 967 } 968 off += PAGE_SIZE; 969 if (off < endoff) { 970 pg = uvm_pagelookup(uobj, off); 971 } 972 continue; 973 } 974 975 /* 976 * if the current page needs to be cleaned and it's busy, 977 * wait for it to become unbusy. 978 */ 979 980 yld = (l->l_cpu->ci_schedstate.spc_flags & 981 SPCF_SHOULDYIELD) && !pagedaemon; 982 if (pg->flags & PG_BUSY || yld) { 983 UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0); 984 if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) { 985 UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0); 986 error = EDEADLK; 987 if (busypg != NULL) 988 *busypg = pg; 989 break; 990 } 991 if (pagedaemon) { 992 /* 993 * someone has taken the page while we 994 * dropped the lock for fstrans_start. 995 */ 996 break; 997 } 998 if (by_list) { 999 TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue); 1000 UVMHIST_LOG(ubchist, "curmp next %p", 1001 TAILQ_NEXT(&curmp, listq.queue), 0,0,0); 1002 } 1003 if (yld) { 1004 mutex_exit(slock); 1005 preempt(); 1006 mutex_enter(slock); 1007 } else { 1008 pg->flags |= PG_WANTED; 1009 UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0); 1010 mutex_enter(slock); 1011 } 1012 if (by_list) { 1013 UVMHIST_LOG(ubchist, "after next %p", 1014 TAILQ_NEXT(&curmp, listq.queue), 0,0,0); 1015 pg = TAILQ_NEXT(&curmp, listq.queue); 1016 TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue); 1017 } else { 1018 pg = uvm_pagelookup(uobj, off); 1019 } 1020 continue; 1021 } 1022 1023 /* 1024 * if we're freeing, remove all mappings of the page now. 1025 * if we're cleaning, check if the page is needs to be cleaned. 1026 */ 1027 1028 if (flags & PGO_FREE) { 1029 pmap_page_protect(pg, VM_PROT_NONE); 1030 } else if (flags & PGO_CLEANIT) { 1031 1032 /* 1033 * if we still have some hope to pull this vnode off 1034 * from the syncer queue, write-protect the page. 1035 */ 1036 1037 if (cleanall && wasclean && 1038 gp->g_dirtygen == dirtygen) { 1039 1040 /* 1041 * uobj pages get wired only by uvm_fault 1042 * where uobj is locked. 1043 */ 1044 1045 if (pg->wire_count == 0) { 1046 pmap_page_protect(pg, 1047 VM_PROT_READ|VM_PROT_EXECUTE); 1048 } else { 1049 cleanall = false; 1050 } 1051 } 1052 } 1053 1054 if (flags & PGO_CLEANIT) { 1055 needs_clean = pmap_clear_modify(pg) || 1056 (pg->flags & PG_CLEAN) == 0; 1057 pg->flags |= PG_CLEAN; 1058 } else { 1059 needs_clean = false; 1060 } 1061 1062 /* 1063 * if we're cleaning, build a cluster. 1064 * the cluster will consist of pages which are currently dirty, 1065 * but they will be returned to us marked clean. 1066 * if not cleaning, just operate on the one page. 1067 */ 1068 1069 if (needs_clean) { 1070 KDASSERT((vp->v_iflag & VI_ONWORKLST)); 1071 wasclean = false; 1072 memset(pgs, 0, sizeof(pgs)); 1073 pg->flags |= PG_BUSY; 1074 UVM_PAGE_OWN(pg, "genfs_putpages"); 1075 1076 /* 1077 * first look backward. 1078 */ 1079 1080 npages = MIN(maxpages >> 1, off >> PAGE_SHIFT); 1081 nback = npages; 1082 uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0], 1083 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD); 1084 if (nback) { 1085 memmove(&pgs[0], &pgs[npages - nback], 1086 nback * sizeof(pgs[0])); 1087 if (npages - nback < nback) 1088 memset(&pgs[nback], 0, 1089 (npages - nback) * sizeof(pgs[0])); 1090 else 1091 memset(&pgs[npages - nback], 0, 1092 nback * sizeof(pgs[0])); 1093 } 1094 1095 /* 1096 * then plug in our page of interest. 1097 */ 1098 1099 pgs[nback] = pg; 1100 1101 /* 1102 * then look forward to fill in the remaining space in 1103 * the array of pages. 1104 */ 1105 1106 npages = maxpages - nback - 1; 1107 uvn_findpages(uobj, off + PAGE_SIZE, &npages, 1108 &pgs[nback + 1], 1109 UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY); 1110 npages += nback + 1; 1111 } else { 1112 pgs[0] = pg; 1113 npages = 1; 1114 nback = 0; 1115 } 1116 1117 /* 1118 * apply FREE or DEACTIVATE options if requested. 1119 */ 1120 1121 if (flags & (PGO_DEACTIVATE|PGO_FREE)) { 1122 mutex_enter(&uvm_pageqlock); 1123 } 1124 for (i = 0; i < npages; i++) { 1125 tpg = pgs[i]; 1126 KASSERT(tpg->uobject == uobj); 1127 if (by_list && tpg == TAILQ_NEXT(pg, listq.queue)) 1128 pg = tpg; 1129 if (tpg->offset < startoff || tpg->offset >= endoff) 1130 continue; 1131 if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) { 1132 uvm_pagedeactivate(tpg); 1133 } else if (flags & PGO_FREE) { 1134 pmap_page_protect(tpg, VM_PROT_NONE); 1135 if (tpg->flags & PG_BUSY) { 1136 tpg->flags |= freeflag; 1137 if (pagedaemon) { 1138 uvm_pageout_start(1); 1139 uvm_pagedequeue(tpg); 1140 } 1141 } else { 1142 1143 /* 1144 * ``page is not busy'' 1145 * implies that npages is 1 1146 * and needs_clean is false. 1147 */ 1148 1149 nextpg = TAILQ_NEXT(tpg, listq.queue); 1150 uvm_pagefree(tpg); 1151 if (pagedaemon) 1152 uvmexp.pdfreed++; 1153 } 1154 } 1155 } 1156 if (flags & (PGO_DEACTIVATE|PGO_FREE)) { 1157 mutex_exit(&uvm_pageqlock); 1158 } 1159 if (needs_clean) { 1160 modified = true; 1161 1162 /* 1163 * start the i/o. if we're traversing by list, 1164 * keep our place in the list with a marker page. 1165 */ 1166 1167 if (by_list) { 1168 TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp, 1169 listq.queue); 1170 } 1171 mutex_exit(slock); 1172 error = GOP_WRITE(vp, pgs, npages, flags); 1173 mutex_enter(slock); 1174 if (by_list) { 1175 pg = TAILQ_NEXT(&curmp, listq.queue); 1176 TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue); 1177 } 1178 if (error) { 1179 break; 1180 } 1181 if (by_list) { 1182 continue; 1183 } 1184 } 1185 1186 /* 1187 * find the next page and continue if there was no error. 1188 */ 1189 1190 if (by_list) { 1191 if (nextpg) { 1192 pg = nextpg; 1193 nextpg = NULL; 1194 } else { 1195 pg = TAILQ_NEXT(pg, listq.queue); 1196 } 1197 } else { 1198 off += (npages - nback) << PAGE_SHIFT; 1199 if (off < endoff) { 1200 pg = uvm_pagelookup(uobj, off); 1201 } 1202 } 1203 } 1204 if (by_list) { 1205 TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue); 1206 } 1207 1208 if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 && 1209 (vp->v_type != VBLK || 1210 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) { 1211 GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED); 1212 } 1213 1214 /* 1215 * if we're cleaning and there was nothing to clean, 1216 * take us off the syncer list. if we started any i/o 1217 * and we're doing sync i/o, wait for all writes to finish. 1218 */ 1219 1220 if (cleanall && wasclean && gp->g_dirtygen == dirtygen && 1221 (vp->v_iflag & VI_ONWORKLST) != 0) { 1222#if defined(DEBUG) 1223 TAILQ_FOREACH(pg, &uobj->memq, listq.queue) { 1224 if ((pg->flags & (PG_FAKE | PG_MARKER)) != 0) { 1225 continue; 1226 } 1227 if ((pg->flags & PG_CLEAN) == 0) { 1228 printf("%s: %p: !CLEAN\n", __func__, pg); 1229 } 1230 if (pmap_is_modified(pg)) { 1231 printf("%s: %p: modified\n", __func__, pg); 1232 } 1233 } 1234#endif /* defined(DEBUG) */ 1235 vp->v_iflag &= ~VI_WRMAPDIRTY; 1236 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) 1237 vn_syncer_remove_from_worklist(vp); 1238 } 1239 1240#if !defined(DEBUG) 1241skip_scan: 1242#endif /* !defined(DEBUG) */ 1243 1244 /* Wait for output to complete. */ 1245 if (!wasclean && !async && vp->v_numoutput != 0) { 1246 while (vp->v_numoutput != 0) 1247 cv_wait(&vp->v_cv, slock); 1248 } 1249 onworklst = (vp->v_iflag & VI_ONWORKLST) != 0; 1250 mutex_exit(slock); 1251 1252 if ((flags & PGO_RECLAIM) != 0 && onworklst) { 1253 /* 1254 * in the case of PGO_RECLAIM, ensure to make the vnode clean. 1255 * retrying is not a big deal because, in many cases, 1256 * uobj->uo_npages is already 0 here. 1257 */ 1258 mutex_enter(slock); 1259 goto retry; 1260 } 1261 1262 if (has_trans) { 1263 if (need_wapbl) 1264 WAPBL_END(vp->v_mount); 1265 fstrans_done(vp->v_mount); 1266 } 1267 1268 return (error); 1269} 1270 1271int 1272genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags) 1273{ 1274 off_t off; 1275 vaddr_t kva; 1276 size_t len; 1277 int error; 1278 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 1279 1280 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x", 1281 vp, pgs, npages, flags); 1282 1283 off = pgs[0]->offset; 1284 kva = uvm_pagermapin(pgs, npages, 1285 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK); 1286 len = npages << PAGE_SHIFT; 1287 1288 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE, 1289 uvm_aio_biodone); 1290 1291 return error; 1292} 1293 1294int 1295genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags) 1296{ 1297 off_t off; 1298 vaddr_t kva; 1299 size_t len; 1300 int error; 1301 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 1302 1303 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x", 1304 vp, pgs, npages, flags); 1305 1306 off = pgs[0]->offset; 1307 kva = uvm_pagermapin(pgs, npages, 1308 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK); 1309 len = npages << PAGE_SHIFT; 1310 1311 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE, 1312 uvm_aio_biodone); 1313 1314 return error; 1315} 1316 1317/* 1318 * Backend routine for doing I/O to vnode pages. Pages are already locked 1319 * and mapped into kernel memory. Here we just look up the underlying 1320 * device block addresses and call the strategy routine. 1321 */ 1322 1323static int 1324genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags, 1325 enum uio_rw rw, void (*iodone)(struct buf *)) 1326{ 1327 int s, error; 1328 int fs_bshift, dev_bshift; 1329 off_t eof, offset, startoffset; 1330 size_t bytes, iobytes, skipbytes; 1331 struct buf *mbp, *bp; 1332 const bool async = (flags & PGO_SYNCIO) == 0; 1333 const bool iowrite = rw == UIO_WRITE; 1334 const int brw = iowrite ? B_WRITE : B_READ; 1335 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 1336 1337 UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x", 1338 vp, kva, len, flags); 1339 1340 KASSERT(vp->v_size <= vp->v_writesize); 1341 GOP_SIZE(vp, vp->v_writesize, &eof, 0); 1342 if (vp->v_type != VBLK) { 1343 fs_bshift = vp->v_mount->mnt_fs_bshift; 1344 dev_bshift = vp->v_mount->mnt_dev_bshift; 1345 } else { 1346 fs_bshift = DEV_BSHIFT; 1347 dev_bshift = DEV_BSHIFT; 1348 } 1349 error = 0; 1350 startoffset = off; 1351 bytes = MIN(len, eof - startoffset); 1352 skipbytes = 0; 1353 KASSERT(bytes != 0); 1354 1355 if (iowrite) { 1356 mutex_enter(vp->v_interlock); 1357 vp->v_numoutput += 2; 1358 mutex_exit(vp->v_interlock); 1359 } 1360 mbp = getiobuf(vp, true); 1361 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x", 1362 vp, mbp, vp->v_numoutput, bytes); 1363 mbp->b_bufsize = len; 1364 mbp->b_data = (void *)kva; 1365 mbp->b_resid = mbp->b_bcount = bytes; 1366 mbp->b_cflags = BC_BUSY | BC_AGE; 1367 if (async) { 1368 mbp->b_flags = brw | B_ASYNC; 1369 mbp->b_iodone = iodone; 1370 } else { 1371 mbp->b_flags = brw; 1372 mbp->b_iodone = NULL; 1373 } 1374 if (curlwp == uvm.pagedaemon_lwp) 1375 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED); 1376 else if (async) 1377 BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL); 1378 else 1379 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL); 1380 1381 bp = NULL; 1382 for (offset = startoffset; 1383 bytes > 0; 1384 offset += iobytes, bytes -= iobytes) { 1385 int run; 1386 daddr_t lbn, blkno; 1387 struct vnode *devvp; 1388 1389 /* 1390 * bmap the file to find out the blkno to read from and 1391 * how much we can read in one i/o. if bmap returns an error, 1392 * skip the rest of the top-level i/o. 1393 */ 1394 1395 lbn = offset >> fs_bshift; 1396 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run); 1397 if (error) { 1398 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n", 1399 lbn,error,0,0); 1400 skipbytes += bytes; 1401 bytes = 0; 1402 goto loopdone; 1403 } 1404 1405 /* 1406 * see how many pages can be read with this i/o. 1407 * reduce the i/o size if necessary to avoid 1408 * overwriting pages with valid data. 1409 */ 1410 1411 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset, 1412 bytes); 1413 1414 /* 1415 * if this block isn't allocated, zero it instead of 1416 * reading it. unless we are going to allocate blocks, 1417 * mark the pages we zeroed PG_RDONLY. 1418 */ 1419 1420 if (blkno == (daddr_t)-1) { 1421 if (!iowrite) { 1422 memset((char *)kva + (offset - startoffset), 0, 1423 iobytes); 1424 } 1425 skipbytes += iobytes; 1426 continue; 1427 } 1428 1429 /* 1430 * allocate a sub-buf for this piece of the i/o 1431 * (or just use mbp if there's only 1 piece), 1432 * and start it going. 1433 */ 1434 1435 if (offset == startoffset && iobytes == bytes) { 1436 bp = mbp; 1437 } else { 1438 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d", 1439 vp, bp, vp->v_numoutput, 0); 1440 bp = getiobuf(vp, true); 1441 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes); 1442 } 1443 bp->b_lblkno = 0; 1444 1445 /* adjust physical blkno for partial blocks */ 1446 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >> 1447 dev_bshift); 1448 1449 UVMHIST_LOG(ubchist, 1450 "bp %p offset 0x%x bcount 0x%x blkno 0x%x", 1451 bp, offset, bp->b_bcount, bp->b_blkno); 1452 1453 VOP_STRATEGY(devvp, bp); 1454 } 1455 1456loopdone: 1457 if (skipbytes) { 1458 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0); 1459 } 1460 nestiobuf_done(mbp, skipbytes, error); 1461 if (async) { 1462 UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0); 1463 return (0); 1464 } 1465 UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0); 1466 error = biowait(mbp); 1467 s = splbio(); 1468 (*iodone)(mbp); 1469 splx(s); 1470 UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0); 1471 return (error); 1472} 1473 1474int 1475genfs_compat_getpages(void *v) 1476{ 1477 struct vop_getpages_args /* { 1478 struct vnode *a_vp; 1479 voff_t a_offset; 1480 struct vm_page **a_m; 1481 int *a_count; 1482 int a_centeridx; 1483 vm_prot_t a_access_type; 1484 int a_advice; 1485 int a_flags; 1486 } */ *ap = v; 1487 1488 off_t origoffset; 1489 struct vnode *vp = ap->a_vp; 1490 struct uvm_object *uobj = &vp->v_uobj; 1491 struct vm_page *pg, **pgs; 1492 vaddr_t kva; 1493 int i, error, orignpages, npages; 1494 struct iovec iov; 1495 struct uio uio; 1496 kauth_cred_t cred = curlwp->l_cred; 1497 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0; 1498 1499 error = 0; 1500 origoffset = ap->a_offset; 1501 orignpages = *ap->a_count; 1502 pgs = ap->a_m; 1503 1504 if (ap->a_flags & PGO_LOCKED) { 1505 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, 1506 UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0)); 1507 1508 error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0; 1509 if (error == 0 && memwrite) { 1510 genfs_markdirty(vp); 1511 } 1512 return error; 1513 } 1514 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) { 1515 mutex_exit(uobj->vmobjlock); 1516 return EINVAL; 1517 } 1518 if ((ap->a_flags & PGO_SYNCIO) == 0) { 1519 mutex_exit(uobj->vmobjlock); 1520 return 0; 1521 } 1522 npages = orignpages; 1523 uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL); 1524 mutex_exit(uobj->vmobjlock); 1525 kva = uvm_pagermapin(pgs, npages, 1526 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK); 1527 for (i = 0; i < npages; i++) { 1528 pg = pgs[i]; 1529 if ((pg->flags & PG_FAKE) == 0) { 1530 continue; 1531 } 1532 iov.iov_base = (char *)kva + (i << PAGE_SHIFT); 1533 iov.iov_len = PAGE_SIZE; 1534 uio.uio_iov = &iov; 1535 uio.uio_iovcnt = 1; 1536 uio.uio_offset = origoffset + (i << PAGE_SHIFT); 1537 uio.uio_rw = UIO_READ; 1538 uio.uio_resid = PAGE_SIZE; 1539 UIO_SETUP_SYSSPACE(&uio); 1540 /* XXX vn_lock */ 1541 error = VOP_READ(vp, &uio, 0, cred); 1542 if (error) { 1543 break; 1544 } 1545 if (uio.uio_resid) { 1546 memset(iov.iov_base, 0, uio.uio_resid); 1547 } 1548 } 1549 uvm_pagermapout(kva, npages); 1550 mutex_enter(uobj->vmobjlock); 1551 mutex_enter(&uvm_pageqlock); 1552 for (i = 0; i < npages; i++) { 1553 pg = pgs[i]; 1554 if (error && (pg->flags & PG_FAKE) != 0) { 1555 pg->flags |= PG_RELEASED; 1556 } else { 1557 pmap_clear_modify(pg); 1558 uvm_pageactivate(pg); 1559 } 1560 } 1561 if (error) { 1562 uvm_page_unbusy(pgs, npages); 1563 } 1564 mutex_exit(&uvm_pageqlock); 1565 if (error == 0 && memwrite) { 1566 genfs_markdirty(vp); 1567 } 1568 mutex_exit(uobj->vmobjlock); 1569 return error; 1570} 1571 1572int 1573genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, 1574 int flags) 1575{ 1576 off_t offset; 1577 struct iovec iov; 1578 struct uio uio; 1579 kauth_cred_t cred = curlwp->l_cred; 1580 struct buf *bp; 1581 vaddr_t kva; 1582 int error; 1583 1584 offset = pgs[0]->offset; 1585 kva = uvm_pagermapin(pgs, npages, 1586 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK); 1587 1588 iov.iov_base = (void *)kva; 1589 iov.iov_len = npages << PAGE_SHIFT; 1590 uio.uio_iov = &iov; 1591 uio.uio_iovcnt = 1; 1592 uio.uio_offset = offset; 1593 uio.uio_rw = UIO_WRITE; 1594 uio.uio_resid = npages << PAGE_SHIFT; 1595 UIO_SETUP_SYSSPACE(&uio); 1596 /* XXX vn_lock */ 1597 error = VOP_WRITE(vp, &uio, 0, cred); 1598 1599 mutex_enter(vp->v_interlock); 1600 vp->v_numoutput++; 1601 mutex_exit(vp->v_interlock); 1602 1603 bp = getiobuf(vp, true); 1604 bp->b_cflags = BC_BUSY | BC_AGE; 1605 bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift; 1606 bp->b_data = (char *)kva; 1607 bp->b_bcount = npages << PAGE_SHIFT; 1608 bp->b_bufsize = npages << PAGE_SHIFT; 1609 bp->b_resid = 0; 1610 bp->b_error = error; 1611 uvm_aio_aiodone(bp); 1612 return (error); 1613} 1614 1615/* 1616 * Process a uio using direct I/O. If we reach a part of the request 1617 * which cannot be processed in this fashion for some reason, just return. 1618 * The caller must handle some additional part of the request using 1619 * buffered I/O before trying direct I/O again. 1620 */ 1621 1622void 1623genfs_directio(struct vnode *vp, struct uio *uio, int ioflag) 1624{ 1625 struct vmspace *vs; 1626 struct iovec *iov; 1627 vaddr_t va; 1628 size_t len; 1629 const int mask = DEV_BSIZE - 1; 1630 int error; 1631 bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl && 1632 (ioflag & IO_JOURNALLOCKED) == 0); 1633 1634 /* 1635 * We only support direct I/O to user space for now. 1636 */ 1637 1638 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) { 1639 return; 1640 } 1641 1642 /* 1643 * If the vnode is mapped, we would need to get the getpages lock 1644 * to stabilize the bmap, but then we would get into trouble while 1645 * locking the pages if the pages belong to this same vnode (or a 1646 * multi-vnode cascade to the same effect). Just fall back to 1647 * buffered I/O if the vnode is mapped to avoid this mess. 1648 */ 1649 1650 if (vp->v_vflag & VV_MAPPED) { 1651 return; 1652 } 1653 1654 if (need_wapbl) { 1655 error = WAPBL_BEGIN(vp->v_mount); 1656 if (error) 1657 return; 1658 } 1659 1660 /* 1661 * Do as much of the uio as possible with direct I/O. 1662 */ 1663 1664 vs = uio->uio_vmspace; 1665 while (uio->uio_resid) { 1666 iov = uio->uio_iov; 1667 if (iov->iov_len == 0) { 1668 uio->uio_iov++; 1669 uio->uio_iovcnt--; 1670 continue; 1671 } 1672 va = (vaddr_t)iov->iov_base; 1673 len = MIN(iov->iov_len, genfs_maxdio); 1674 len &= ~mask; 1675 1676 /* 1677 * If the next chunk is smaller than DEV_BSIZE or extends past 1678 * the current EOF, then fall back to buffered I/O. 1679 */ 1680 1681 if (len == 0 || uio->uio_offset + len > vp->v_size) { 1682 break; 1683 } 1684 1685 /* 1686 * Check alignment. The file offset must be at least 1687 * sector-aligned. The exact constraint on memory alignment 1688 * is very hardware-dependent, but requiring sector-aligned 1689 * addresses there too is safe. 1690 */ 1691 1692 if (uio->uio_offset & mask || va & mask) { 1693 break; 1694 } 1695 error = genfs_do_directio(vs, va, len, vp, uio->uio_offset, 1696 uio->uio_rw); 1697 if (error) { 1698 break; 1699 } 1700 iov->iov_base = (char *)iov->iov_base + len; 1701 iov->iov_len -= len; 1702 uio->uio_offset += len; 1703 uio->uio_resid -= len; 1704 } 1705 1706 if (need_wapbl) 1707 WAPBL_END(vp->v_mount); 1708} 1709 1710/* 1711 * Iodone routine for direct I/O. We don't do much here since the request is 1712 * always synchronous, so the caller will do most of the work after biowait(). 1713 */ 1714 1715static void 1716genfs_dio_iodone(struct buf *bp) 1717{ 1718 1719 KASSERT((bp->b_flags & B_ASYNC) == 0); 1720 if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) { 1721 mutex_enter(bp->b_objlock); 1722 vwakeup(bp); 1723 mutex_exit(bp->b_objlock); 1724 } 1725 putiobuf(bp); 1726} 1727 1728/* 1729 * Process one chunk of a direct I/O request. 1730 */ 1731 1732static int 1733genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp, 1734 off_t off, enum uio_rw rw) 1735{ 1736 struct vm_map *map; 1737 struct pmap *upm, *kpm; 1738 size_t klen = round_page(uva + len) - trunc_page(uva); 1739 off_t spoff, epoff; 1740 vaddr_t kva, puva; 1741 paddr_t pa; 1742 vm_prot_t prot; 1743 int error, rv, poff, koff; 1744 const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED | 1745 (rw == UIO_WRITE ? PGO_FREE : 0); 1746 1747 /* 1748 * For writes, verify that this range of the file already has fully 1749 * allocated backing store. If there are any holes, just punt and 1750 * make the caller take the buffered write path. 1751 */ 1752 1753 if (rw == UIO_WRITE) { 1754 daddr_t lbn, elbn, blkno; 1755 int bsize, bshift, run; 1756 1757 bshift = vp->v_mount->mnt_fs_bshift; 1758 bsize = 1 << bshift; 1759 lbn = off >> bshift; 1760 elbn = (off + len + bsize - 1) >> bshift; 1761 while (lbn < elbn) { 1762 error = VOP_BMAP(vp, lbn, NULL, &blkno, &run); 1763 if (error) { 1764 return error; 1765 } 1766 if (blkno == (daddr_t)-1) { 1767 return ENOSPC; 1768 } 1769 lbn += 1 + run; 1770 } 1771 } 1772 1773 /* 1774 * Flush any cached pages for parts of the file that we're about to 1775 * access. If we're writing, invalidate pages as well. 1776 */ 1777 1778 spoff = trunc_page(off); 1779 epoff = round_page(off + len); 1780 mutex_enter(vp->v_interlock); 1781 error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags); 1782 if (error) { 1783 return error; 1784 } 1785 1786 /* 1787 * Wire the user pages and remap them into kernel memory. 1788 */ 1789 1790 prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ; 1791 error = uvm_vslock(vs, (void *)uva, len, prot); 1792 if (error) { 1793 return error; 1794 } 1795 1796 map = &vs->vm_map; 1797 upm = vm_map_pmap(map); 1798 kpm = vm_map_pmap(kernel_map); 1799 puva = trunc_page(uva); 1800 kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask, 1801 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH); 1802 for (poff = 0; poff < klen; poff += PAGE_SIZE) { 1803 rv = pmap_extract(upm, puva + poff, &pa); 1804 KASSERT(rv); 1805 pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED); 1806 } 1807 pmap_update(kpm); 1808 1809 /* 1810 * Do the I/O. 1811 */ 1812 1813 koff = uva - trunc_page(uva); 1814 error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw, 1815 genfs_dio_iodone); 1816 1817 /* 1818 * Tear down the kernel mapping. 1819 */ 1820 1821 pmap_kremove(kva, klen); 1822 pmap_update(kpm); 1823 uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY); 1824 1825 /* 1826 * Unwire the user pages. 1827 */ 1828 1829 uvm_vsunlock(vs, (void *)uva, len); 1830 return error; 1831} 1832