Deleted Added
full compact
vfs_bio.c (46181) vfs_bio.c (46349)
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
13 *
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
13 *
14 * $Id: vfs_bio.c,v 1.206 1999/04/14 18:51:52 dt Exp $
14 * $Id: vfs_bio.c,v 1.207 1999/04/29 18:15:25 alc Exp $
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme. Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *

--- 46 unchanged lines hidden (view full) ---

69
70struct buf *buf; /* buffer header pool */
71struct swqueue bswlist;
72
73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
74 vm_offset_t to);
75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
76 vm_offset_t to);
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme. Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *

--- 46 unchanged lines hidden (view full) ---

69
70struct buf *buf; /* buffer header pool */
71struct swqueue bswlist;
72
73static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
74 vm_offset_t to);
75static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
76 vm_offset_t to);
77static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff,
78 vm_offset_t off, vm_offset_t size,
79 vm_page_t m);
80static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
81 int pageno, vm_page_t m);
82static void vfs_clean_pages(struct buf * bp);
83static void vfs_setdirty(struct buf *bp);
84static void vfs_vmio_release(struct buf *bp);
85static void flushdirtybuffers(int slpflag, int slptimeo);
86static int flushbufqueues(void);
87

--- 129 unchanged lines hidden (view full) ---

217 needsbuffer &= ~VFS_BIO_NEED_ANY;
218 if (numfreebuffers >= hifreebuffers)
219 needsbuffer &= ~VFS_BIO_NEED_FREE;
220 wakeup(&needsbuffer);
221 }
222}
223
224/*
77static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
78 int pageno, vm_page_t m);
79static void vfs_clean_pages(struct buf * bp);
80static void vfs_setdirty(struct buf *bp);
81static void vfs_vmio_release(struct buf *bp);
82static void flushdirtybuffers(int slpflag, int slptimeo);
83static int flushbufqueues(void);
84

--- 129 unchanged lines hidden (view full) ---

214 needsbuffer &= ~VFS_BIO_NEED_ANY;
215 if (numfreebuffers >= hifreebuffers)
216 needsbuffer &= ~VFS_BIO_NEED_FREE;
217 wakeup(&needsbuffer);
218 }
219}
220
221/*
222 * vfs_buf_test_cache:
223 *
224 * Called when a buffer is extended. This function clears the B_CACHE
225 * bit if the newly extended portion of the buffer does not contain
226 * valid data.
227 */
228static __inline__
229void
230vfs_buf_test_cache(struct buf *bp,
231 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
232 vm_page_t m)
233{
234 if (bp->b_flags & B_CACHE) {
235 int base = (foff + off) & PAGE_MASK;
236 if (vm_page_is_valid(m, base, size) == 0)
237 bp->b_flags &= ~B_CACHE;
238 }
239}
240
241
242/*
225 * Initialize buffer headers and related structures.
226 */
227void
228bufinit()
229{
230 struct buf *bp;
231 int i;
232

--- 133 unchanged lines hidden (view full) ---

366 break;
367 }
368 }
369 splx(s);
370}
371
372
373/*
243 * Initialize buffer headers and related structures.
244 */
245void
246bufinit()
247{
248 struct buf *bp;
249 int i;
250

--- 133 unchanged lines hidden (view full) ---

384 break;
385 }
386 }
387 splx(s);
388}
389
390
391/*
374 * Get a buffer with the specified data. Look in the cache first.
392 * Get a buffer with the specified data. Look in the cache first. We
393 * must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE
394 * is set, the buffer is valid and we do not have to do anything ( see
395 * getblk() ).
375 */
376int
377bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
378 struct buf ** bpp)
379{
380 struct buf *bp;
381
382 bp = getblk(vp, blkno, size, 0, 0);
383 *bpp = bp;
384
385 /* if not found in cache, do some I/O */
386 if ((bp->b_flags & B_CACHE) == 0) {
387 if (curproc != NULL)
388 curproc->p_stats->p_ru.ru_inblock++;
389 KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
390 bp->b_flags |= B_READ;
396 */
397int
398bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
399 struct buf ** bpp)
400{
401 struct buf *bp;
402
403 bp = getblk(vp, blkno, size, 0, 0);
404 *bpp = bp;
405
406 /* if not found in cache, do some I/O */
407 if ((bp->b_flags & B_CACHE) == 0) {
408 if (curproc != NULL)
409 curproc->p_stats->p_ru.ru_inblock++;
410 KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
411 bp->b_flags |= B_READ;
391 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
412 bp->b_flags &= ~(B_ERROR | B_INVAL);
392 if (bp->b_rcred == NOCRED) {
393 if (cred != NOCRED)
394 crhold(cred);
395 bp->b_rcred = cred;
396 }
397 vfs_busy_pages(bp, 0);
398 VOP_STRATEGY(vp, bp);
399 return (biowait(bp));
400 }
401 return (0);
402}
403
404/*
405 * Operates like bread, but also starts asynchronous I/O on
413 if (bp->b_rcred == NOCRED) {
414 if (cred != NOCRED)
415 crhold(cred);
416 bp->b_rcred = cred;
417 }
418 vfs_busy_pages(bp, 0);
419 VOP_STRATEGY(vp, bp);
420 return (biowait(bp));
421 }
422 return (0);
423}
424
425/*
426 * Operates like bread, but also starts asynchronous I/O on
406 * read-ahead blocks.
427 * read-ahead blocks. We must clear B_ERROR and B_INVAL prior
428 * to initiating I/O . If B_CACHE is set, the buffer is valid
429 * and we do not have to do anything.
407 */
408int
409breadn(struct vnode * vp, daddr_t blkno, int size,
410 daddr_t * rablkno, int *rabsize,
411 int cnt, struct ucred * cred, struct buf ** bpp)
412{
413 struct buf *bp, *rabp;
414 int i;
415 int rv = 0, readwait = 0;
416
417 *bpp = bp = getblk(vp, blkno, size, 0, 0);
418
419 /* if not found in cache, do some I/O */
420 if ((bp->b_flags & B_CACHE) == 0) {
421 if (curproc != NULL)
422 curproc->p_stats->p_ru.ru_inblock++;
423 bp->b_flags |= B_READ;
430 */
431int
432breadn(struct vnode * vp, daddr_t blkno, int size,
433 daddr_t * rablkno, int *rabsize,
434 int cnt, struct ucred * cred, struct buf ** bpp)
435{
436 struct buf *bp, *rabp;
437 int i;
438 int rv = 0, readwait = 0;
439
440 *bpp = bp = getblk(vp, blkno, size, 0, 0);
441
442 /* if not found in cache, do some I/O */
443 if ((bp->b_flags & B_CACHE) == 0) {
444 if (curproc != NULL)
445 curproc->p_stats->p_ru.ru_inblock++;
446 bp->b_flags |= B_READ;
424 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
447 bp->b_flags &= ~(B_ERROR | B_INVAL);
425 if (bp->b_rcred == NOCRED) {
426 if (cred != NOCRED)
427 crhold(cred);
428 bp->b_rcred = cred;
429 }
430 vfs_busy_pages(bp, 0);
431 VOP_STRATEGY(vp, bp);
432 ++readwait;
433 }
434
435 for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
436 if (inmem(vp, *rablkno))
437 continue;
438 rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
439
440 if ((rabp->b_flags & B_CACHE) == 0) {
441 if (curproc != NULL)
442 curproc->p_stats->p_ru.ru_inblock++;
443 rabp->b_flags |= B_READ | B_ASYNC;
448 if (bp->b_rcred == NOCRED) {
449 if (cred != NOCRED)
450 crhold(cred);
451 bp->b_rcred = cred;
452 }
453 vfs_busy_pages(bp, 0);
454 VOP_STRATEGY(vp, bp);
455 ++readwait;
456 }
457
458 for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
459 if (inmem(vp, *rablkno))
460 continue;
461 rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
462
463 if ((rabp->b_flags & B_CACHE) == 0) {
464 if (curproc != NULL)
465 curproc->p_stats->p_ru.ru_inblock++;
466 rabp->b_flags |= B_READ | B_ASYNC;
444 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
467 rabp->b_flags &= ~(B_ERROR | B_INVAL);
445 if (rabp->b_rcred == NOCRED) {
446 if (cred != NOCRED)
447 crhold(cred);
448 rabp->b_rcred = cred;
449 }
450 vfs_busy_pages(rabp, 0);
451 VOP_STRATEGY(vp, rabp);
452 } else {

--- 4 unchanged lines hidden (view full) ---

457 if (readwait) {
458 rv = biowait(bp);
459 }
460 return (rv);
461}
462
463/*
464 * Write, release buffer on completion. (Done by iodone
468 if (rabp->b_rcred == NOCRED) {
469 if (cred != NOCRED)
470 crhold(cred);
471 rabp->b_rcred = cred;
472 }
473 vfs_busy_pages(rabp, 0);
474 VOP_STRATEGY(vp, rabp);
475 } else {

--- 4 unchanged lines hidden (view full) ---

480 if (readwait) {
481 rv = biowait(bp);
482 }
483 return (rv);
484}
485
486/*
487 * Write, release buffer on completion. (Done by iodone
465 * if async.)
488 * if async). Do not bother writing anything if the buffer
489 * is invalid.
490 *
491 * Note that we set B_CACHE here, indicating that buffer is
492 * fully valid and thus cacheable. This is true even of NFS
493 * now so we set it generally. This could be set either here
494 * or in biodone() since the I/O is synchronous. We put it
495 * here.
466 */
467int
468bwrite(struct buf * bp)
469{
470 int oldflags, s;
471 struct vnode *vp;
472 struct mount *mp;
473

--- 7 unchanged lines hidden (view full) ---

481#if !defined(MAX_PERF)
482 if ((bp->b_flags & B_BUSY) == 0)
483 panic("bwrite: buffer is not busy???");
484#endif
485 s = splbio();
486 bundirty(bp);
487
488 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
496 */
497int
498bwrite(struct buf * bp)
499{
500 int oldflags, s;
501 struct vnode *vp;
502 struct mount *mp;
503

--- 7 unchanged lines hidden (view full) ---

511#if !defined(MAX_PERF)
512 if ((bp->b_flags & B_BUSY) == 0)
513 panic("bwrite: buffer is not busy???");
514#endif
515 s = splbio();
516 bundirty(bp);
517
518 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
489 bp->b_flags |= B_WRITEINPROG;
519 bp->b_flags |= B_WRITEINPROG | B_CACHE;
490
491 bp->b_vp->v_numoutput++;
492 vfs_busy_pages(bp, 1);
493 if (curproc != NULL)
494 curproc->p_stats->p_ru.ru_oublock++;
495 splx(s);
496 VOP_STRATEGY(bp->b_vp, bp);
497
498 /*
499 * Collect statistics on synchronous and asynchronous writes.
500 * Writes to block devices are charged to their associated
501 * filesystem (if any).
502 */
503 if ((vp = bp->b_vp) != NULL) {
504 if (vp->v_type == VBLK)
505 mp = vp->v_specmountpoint;
506 else
507 mp = vp->v_mount;
520
521 bp->b_vp->v_numoutput++;
522 vfs_busy_pages(bp, 1);
523 if (curproc != NULL)
524 curproc->p_stats->p_ru.ru_oublock++;
525 splx(s);
526 VOP_STRATEGY(bp->b_vp, bp);
527
528 /*
529 * Collect statistics on synchronous and asynchronous writes.
530 * Writes to block devices are charged to their associated
531 * filesystem (if any).
532 */
533 if ((vp = bp->b_vp) != NULL) {
534 if (vp->v_type == VBLK)
535 mp = vp->v_specmountpoint;
536 else
537 mp = vp->v_mount;
508 if (mp != NULL)
538 if (mp != NULL) {
509 if ((oldflags & B_ASYNC) == 0)
510 mp->mnt_stat.f_syncwrites++;
511 else
512 mp->mnt_stat.f_asyncwrites++;
539 if ((oldflags & B_ASYNC) == 0)
540 mp->mnt_stat.f_syncwrites++;
541 else
542 mp->mnt_stat.f_asyncwrites++;
543 }
513 }
514
515 if ((oldflags & B_ASYNC) == 0) {
516 int rtval = biowait(bp);
517 brelse(bp);
518 return (rtval);
519 }
520
521 return (0);
522}
523
524/*
544 }
545
546 if ((oldflags & B_ASYNC) == 0) {
547 int rtval = biowait(bp);
548 brelse(bp);
549 return (rtval);
550 }
551
552 return (0);
553}
554
555/*
525 * Delayed write. (Buffer is marked dirty).
556 * Delayed write. (Buffer is marked dirty). Do not bother writing
557 * anything if the buffer is marked invalid.
558 *
559 * Note that since the buffer must be completely valid, we can safely
560 * set B_CACHE. In fact, we have to set B_CACHE here rather then in
561 * biodone() in order to prevent getblk from writing the buffer
562 * out synchronously.
526 */
527void
528bdwrite(struct buf * bp)
529{
530 struct vnode *vp;
531
532#if !defined(MAX_PERF)
533 if ((bp->b_flags & B_BUSY) == 0) {
534 panic("bdwrite: buffer is not busy");
535 }
536#endif
537
538 if (bp->b_flags & B_INVAL) {
539 brelse(bp);
540 return;
541 }
542 bdirty(bp);
543
544 /*
563 */
564void
565bdwrite(struct buf * bp)
566{
567 struct vnode *vp;
568
569#if !defined(MAX_PERF)
570 if ((bp->b_flags & B_BUSY) == 0) {
571 panic("bdwrite: buffer is not busy");
572 }
573#endif
574
575 if (bp->b_flags & B_INVAL) {
576 brelse(bp);
577 return;
578 }
579 bdirty(bp);
580
581 /*
582 * Set B_CACHE, indicating that the buffer is fully valid. This is
583 * true even of NFS now.
584 */
585 bp->b_flags |= B_CACHE;
586
587 /*
545 * This bmap keeps the system from needing to do the bmap later,
546 * perhaps when the system is attempting to do a sync. Since it
547 * is likely that the indirect block -- or whatever other datastructure
548 * that the filesystem needs is still in memory now, it is a good
549 * thing to do this. Note also, that if the pageout daemon is
550 * requesting a sync -- there might not be enough memory to do
551 * the bmap then... So, this is important to do.
552 */

--- 34 unchanged lines hidden (view full) ---

587
588/*
589 * bdirty:
590 *
591 * Turn buffer into delayed write request. We must clear B_READ and
592 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to
593 * itself to properly update it in the dirty/clean lists. We mark it
594 * B_DONE to ensure that any asynchronization of the buffer properly
588 * This bmap keeps the system from needing to do the bmap later,
589 * perhaps when the system is attempting to do a sync. Since it
590 * is likely that the indirect block -- or whatever other datastructure
591 * that the filesystem needs is still in memory now, it is a good
592 * thing to do this. Note also, that if the pageout daemon is
593 * requesting a sync -- there might not be enough memory to do
594 * the bmap then... So, this is important to do.
595 */

--- 34 unchanged lines hidden (view full) ---

630
631/*
632 * bdirty:
633 *
634 * Turn buffer into delayed write request. We must clear B_READ and
635 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to
636 * itself to properly update it in the dirty/clean lists. We mark it
637 * B_DONE to ensure that any asynchronization of the buffer properly
595 * clears B_DONE ( else a panic will occur later ). Note that B_INVALID
596 * buffers are not considered dirty even if B_DELWRI is set.
638 * clears B_DONE ( else a panic will occur later ).
597 *
639 *
640 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
641 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty()
642 * should only be called if the buffer is known-good.
643 *
598 * Since the buffer is not on a queue, we do not update the numfreebuffers
599 * count.
600 *
601 * Must be called at splbio().
602 * The buffer must be on QUEUE_NONE.
603 */
604void
605bdirty(bp)

--- 34 unchanged lines hidden (view full) ---

640 }
641}
642
643/*
644 * bawrite:
645 *
646 * Asynchronous write. Start output on a buffer, but do not wait for
647 * it to complete. The buffer is released when the output completes.
644 * Since the buffer is not on a queue, we do not update the numfreebuffers
645 * count.
646 *
647 * Must be called at splbio().
648 * The buffer must be on QUEUE_NONE.
649 */
650void
651bdirty(bp)

--- 34 unchanged lines hidden (view full) ---

686 }
687}
688
689/*
690 * bawrite:
691 *
692 * Asynchronous write. Start output on a buffer, but do not wait for
693 * it to complete. The buffer is released when the output completes.
694 *
695 * bwrite() ( or the VOP routine anyway ) is responsible for handling
696 * B_INVAL buffers. Not us.
648 */
649void
650bawrite(struct buf * bp)
651{
652 bp->b_flags |= B_ASYNC;
653 (void) VOP_BWRITE(bp);
654}
655
656/*
657 * bowrite:
658 *
659 * Ordered write. Start output on a buffer, and flag it so that the
660 * device will write it in the order it was queued. The buffer is
697 */
698void
699bawrite(struct buf * bp)
700{
701 bp->b_flags |= B_ASYNC;
702 (void) VOP_BWRITE(bp);
703}
704
705/*
706 * bowrite:
707 *
708 * Ordered write. Start output on a buffer, and flag it so that the
709 * device will write it in the order it was queued. The buffer is
661 * released when the output completes.
710 * released when the output completes. bwrite() ( or the VOP routine
711 * anyway ) is responsible for handling B_INVAL buffers.
662 */
663int
664bowrite(struct buf * bp)
665{
666 bp->b_flags |= B_ORDERED | B_ASYNC;
667 return (VOP_BWRITE(bp));
668}
669

--- 19 unchanged lines hidden (view full) ---

689#endif
690
691 s = splbio();
692
693 if (bp->b_flags & B_LOCKED)
694 bp->b_flags &= ~B_ERROR;
695
696 if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) {
712 */
713int
714bowrite(struct buf * bp)
715{
716 bp->b_flags |= B_ORDERED | B_ASYNC;
717 return (VOP_BWRITE(bp));
718}
719

--- 19 unchanged lines hidden (view full) ---

739#endif
740
741 s = splbio();
742
743 if (bp->b_flags & B_LOCKED)
744 bp->b_flags &= ~B_ERROR;
745
746 if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) {
747 /*
748 * Failed write, redirty. Must clear B_ERROR to prevent
749 * pages from being scrapped. Note: B_INVAL is ignored
750 * here but will presumably be dealt with later.
751 */
697 bp->b_flags &= ~B_ERROR;
698 bdirty(bp);
699 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) ||
700 (bp->b_bufsize <= 0)) {
752 bp->b_flags &= ~B_ERROR;
753 bdirty(bp);
754 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) ||
755 (bp->b_bufsize <= 0)) {
756 /*
757 * Either a failed I/O or we were asked to free or not
758 * cache the buffer.
759 */
701 bp->b_flags |= B_INVAL;
702 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
703 (*bioops.io_deallocate)(bp);
704 if (bp->b_flags & B_DELWRI)
705 --numdirtybuffers;
706 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF);
707 if ((bp->b_flags & B_VMIO) == 0) {
708 if (bp->b_bufsize)

--- 13 unchanged lines hidden (view full) ---

722 * if B_DELWRI is set.
723 */
724
725 if (bp->b_flags & B_DELWRI)
726 bp->b_flags &= ~B_RELBUF;
727
728 /*
729 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer
760 bp->b_flags |= B_INVAL;
761 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
762 (*bioops.io_deallocate)(bp);
763 if (bp->b_flags & B_DELWRI)
764 --numdirtybuffers;
765 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF);
766 if ((bp->b_flags & B_VMIO) == 0) {
767 if (bp->b_bufsize)

--- 13 unchanged lines hidden (view full) ---

781 * if B_DELWRI is set.
782 */
783
784 if (bp->b_flags & B_DELWRI)
785 bp->b_flags &= ~B_RELBUF;
786
787 /*
788 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer
730 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
731 * but the VM object is kept around. The B_NOCACHE flag is used to
732 * invalidate the pages in the VM object.
789 * constituted, not even NFS buffers now. Two flags effect this. If
790 * B_INVAL, the struct buf is invalidated but the VM object is kept
791 * around ( i.e. so it is trivial to reconstitute the buffer later ).
733 *
792 *
734 * The b_{validoff,validend,dirtyoff,dirtyend} values are relative
735 * to b_offset and currently have byte granularity, whereas the
736 * valid flags in the vm_pages have only DEV_BSIZE resolution.
737 * The byte resolution fields are used to avoid unnecessary re-reads
738 * of the buffer but the code really needs to be genericized so
739 * other filesystem modules can take advantage of these fields.
793 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be
794 * invalidated. B_ERROR cannot be set for a failed write unless the
795 * buffer is also B_INVAL because it hits the re-dirtying code above.
740 *
796 *
741 * XXX this seems to cause performance problems.
797 * Normally we can do this whether a buffer is B_DELWRI or not. If
798 * the buffer is an NFS buffer, it is tracking piecemeal writes or
799 * the commit state and we cannot afford to lose the buffer.
742 */
743 if ((bp->b_flags & B_VMIO)
744 && !(bp->b_vp->v_tag == VT_NFS &&
745 bp->b_vp->v_type != VBLK &&
800 */
801 if ((bp->b_flags & B_VMIO)
802 && !(bp->b_vp->v_tag == VT_NFS &&
803 bp->b_vp->v_type != VBLK &&
746 (bp->b_flags & B_DELWRI) != 0)
747#ifdef notdef
748 && (bp->b_vp->v_tag != VT_NFS
749 || bp->b_vp->v_type == VBLK
750 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR))
751 || bp->b_validend == 0
752 || (bp->b_validoff == 0
753 && bp->b_validend == bp->b_bufsize))
754#endif
804 (bp->b_flags & B_DELWRI))
755 ) {
756
757 int i, j, resid;
758 vm_page_t m;
759 off_t foff;
760 vm_pindex_t poff;
761 vm_object_t obj;
762 struct vnode *vp;

--- 144 unchanged lines hidden (view full) ---

907 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
908 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
909 splx(s);
910}
911
912/*
913 * Release a buffer back to the appropriate queue but do not try to free
914 * it.
805 ) {
806
807 int i, j, resid;
808 vm_page_t m;
809 off_t foff;
810 vm_pindex_t poff;
811 vm_object_t obj;
812 struct vnode *vp;

--- 144 unchanged lines hidden (view full) ---

957 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
958 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
959 splx(s);
960}
961
962/*
963 * Release a buffer back to the appropriate queue but do not try to free
964 * it.
965 *
966 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
967 * biodone() to requeue an async I/O on completion. It is also used when
968 * known good buffers need to be requeued but we think we may need the data
969 * again soon.
915 */
916void
917bqrelse(struct buf * bp)
918{
919 int s;
920
921 s = splbio();
922

--- 168 unchanged lines hidden (view full) ---

1091 }
1092
1093 bremfree(bp);
1094 bp->b_flags |= B_BUSY | B_ASYNC;
1095
1096 splx(s);
1097 /*
1098 * default (old) behavior, writing out only one block
970 */
971void
972bqrelse(struct buf * bp)
973{
974 int s;
975
976 s = splbio();
977

--- 168 unchanged lines hidden (view full) ---

1146 }
1147
1148 bremfree(bp);
1149 bp->b_flags |= B_BUSY | B_ASYNC;
1150
1151 splx(s);
1152 /*
1153 * default (old) behavior, writing out only one block
1154 *
1155 * XXX returns b_bufsize instead of b_bcount for nwritten?
1099 */
1100 nwritten = bp->b_bufsize;
1101 (void) VOP_BWRITE(bp);
1102
1103 return nwritten;
1104}
1105
1106/*
1107 * getnewbuf:
1108 *
1109 * Find and initialize a new buffer header, freeing up existing buffers
1156 */
1157 nwritten = bp->b_bufsize;
1158 (void) VOP_BWRITE(bp);
1159
1160 return nwritten;
1161}
1162
1163/*
1164 * getnewbuf:
1165 *
1166 * Find and initialize a new buffer header, freeing up existing buffers
1110 * in the bufqueues as necessary.
1167 * in the bufqueues as necessary. The new buffer is returned with
1168 * flags set to B_BUSY.
1111 *
1169 *
1170 * Important: B_INVAL is not set. If the caller wishes to throw the
1171 * buffer away, the caller must set B_INVAL prior to calling brelse().
1172 *
1112 * We block if:
1113 * We have insufficient buffer headers
1114 * We have insufficient buffer space
1115 * buffer_map is too fragmented ( space reservation fails )
1116 *
1117 * We do *not* attempt to flush dirty buffers more then one level deep.
1118 * I.e., if P_FLSINPROG is set we do not flush dirty buffers at all.
1119 *

--- 243 unchanged lines hidden (view full) ---

1363 bp->b_blkno = bp->b_lblkno = 0;
1364 bp->b_offset = NOOFFSET;
1365 bp->b_iodone = 0;
1366 bp->b_error = 0;
1367 bp->b_resid = 0;
1368 bp->b_bcount = 0;
1369 bp->b_npages = 0;
1370 bp->b_dirtyoff = bp->b_dirtyend = 0;
1173 * We block if:
1174 * We have insufficient buffer headers
1175 * We have insufficient buffer space
1176 * buffer_map is too fragmented ( space reservation fails )
1177 *
1178 * We do *not* attempt to flush dirty buffers more then one level deep.
1179 * I.e., if P_FLSINPROG is set we do not flush dirty buffers at all.
1180 *

--- 243 unchanged lines hidden (view full) ---

1424 bp->b_blkno = bp->b_lblkno = 0;
1425 bp->b_offset = NOOFFSET;
1426 bp->b_iodone = 0;
1427 bp->b_error = 0;
1428 bp->b_resid = 0;
1429 bp->b_bcount = 0;
1430 bp->b_npages = 0;
1431 bp->b_dirtyoff = bp->b_dirtyend = 0;
1371 bp->b_validoff = bp->b_validend = 0;
1372 bp->b_usecount = 5;
1373
1374 LIST_INIT(&bp->b_dep);
1375
1376 /*
1377 * Ok, now that we have a free buffer, if we are defragging
1378 * we have to recover the kvaspace.
1379 */

--- 80 unchanged lines hidden (view full) ---

1460 addr, addr + maxsize,
1461 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1462
1463 bp->b_kvabase = (caddr_t) addr;
1464 bp->b_kvasize = maxsize;
1465 }
1466 bp->b_data = bp->b_kvabase;
1467 }
1432 bp->b_usecount = 5;
1433
1434 LIST_INIT(&bp->b_dep);
1435
1436 /*
1437 * Ok, now that we have a free buffer, if we are defragging
1438 * we have to recover the kvaspace.
1439 */

--- 80 unchanged lines hidden (view full) ---

1520 addr, addr + maxsize,
1521 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1522
1523 bp->b_kvabase = (caddr_t) addr;
1524 bp->b_kvasize = maxsize;
1525 }
1526 bp->b_data = bp->b_kvabase;
1527 }
1468
1528
1529 /*
1530 * The bp, if valid, is set to B_BUSY.
1531 */
1469 return (bp);
1470}
1471
1472/*
1473 * waitfreebuffers:
1474 *
1475 * Wait for sufficient free buffers. This routine is not called if
1476 * curproc is the update process so we do not have to do anything

--- 64 unchanged lines hidden (view full) ---

1541 if (qindex == QUEUE_LRU)
1542 break;
1543 qindex = QUEUE_LRU;
1544 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL)
1545 break;
1546 }
1547
1548 /*
1532 return (bp);
1533}
1534
1535/*
1536 * waitfreebuffers:
1537 *
1538 * Wait for sufficient free buffers. This routine is not called if
1539 * curproc is the update process so we do not have to do anything

--- 64 unchanged lines hidden (view full) ---

1604 if (qindex == QUEUE_LRU)
1605 break;
1606 qindex = QUEUE_LRU;
1607 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL)
1608 break;
1609 }
1610
1611 /*
1549 * XXX NFS does weird things with B_INVAL bps if we bwrite
1550 * them ( vfs_bio_awrite/bawrite/bdwrite/etc ) Why?
1551 *
1612 * Try to free up B_INVAL delayed-write buffers rather then
1613 * writing them out. Note also that NFS is somewhat sensitive
1614 * to B_INVAL buffers so it is doubly important that we do
1615 * this.
1552 */
1553 if ((bp->b_flags & B_DELWRI) != 0) {
1554 if (bp->b_flags & B_INVAL) {
1555 bremfree(bp);
1556 bp->b_flags |= B_BUSY;
1557 brelse(bp);
1558 } else {
1559 vfs_bio_awrite(bp);

--- 57 unchanged lines hidden (view full) ---

1617 if (vm_page_is_valid(m,
1618 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
1619 return 0;
1620 }
1621 return 1;
1622}
1623
1624/*
1616 */
1617 if ((bp->b_flags & B_DELWRI) != 0) {
1618 if (bp->b_flags & B_INVAL) {
1619 bremfree(bp);
1620 bp->b_flags |= B_BUSY;
1621 brelse(bp);
1622 } else {
1623 vfs_bio_awrite(bp);

--- 57 unchanged lines hidden (view full) ---

1681 if (vm_page_is_valid(m,
1682 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
1683 return 0;
1684 }
1685 return 1;
1686}
1687
1688/*
1625 * now we set the dirty range for the buffer --
1626 * for NFS -- if the file is mapped and pages have
1627 * been written to, let it know. We want the
1628 * entire range of the buffer to be marked dirty if
1629 * any of the pages have been written to for consistancy
1630 * with the b_validoff, b_validend set in the nfs write
1631 * code, and used by the nfs read code.
1689 * vfs_setdirty:
1690 *
1691 * Sets the dirty range for a buffer based on the status of the dirty
1692 * bits in the pages comprising the buffer.
1693 *
1694 * The range is limited to the size of the buffer.
1695 *
1696 * This routine is primarily used by NFS, but is generalized for the
1697 * B_VMIO case.
1632 */
1633static void
1634vfs_setdirty(struct buf *bp)
1635{
1636 int i;
1637 vm_object_t object;
1698 */
1699static void
1700vfs_setdirty(struct buf *bp)
1701{
1702 int i;
1703 vm_object_t object;
1638 vm_offset_t boffset;
1639
1640 /*
1704
1705 /*
1706 * Degenerate case - empty buffer
1707 */
1708
1709 if (bp->b_bufsize == 0)
1710 return;
1711
1712 /*
1641 * We qualify the scan for modified pages on whether the
1642 * object has been flushed yet. The OBJ_WRITEABLE flag
1643 * is not cleared simply by protecting pages off.
1644 */
1645
1646 if ((bp->b_flags & B_VMIO) == 0)
1647 return;
1648
1649 object = bp->b_pages[0]->object;
1650
1651 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
1652 printf("Warning: object %p writeable but not mightbedirty\n", object);
1653 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
1654 printf("Warning: object %p mightbedirty but not writeable\n", object);
1655
1656 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
1713 * We qualify the scan for modified pages on whether the
1714 * object has been flushed yet. The OBJ_WRITEABLE flag
1715 * is not cleared simply by protecting pages off.
1716 */
1717
1718 if ((bp->b_flags & B_VMIO) == 0)
1719 return;
1720
1721 object = bp->b_pages[0]->object;
1722
1723 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
1724 printf("Warning: object %p writeable but not mightbedirty\n", object);
1725 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
1726 printf("Warning: object %p mightbedirty but not writeable\n", object);
1727
1728 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
1729 vm_offset_t boffset;
1730 vm_offset_t eoffset;
1731
1657 /*
1658 * test the pages to see if they have been modified directly
1659 * by users through the VM system.
1660 */
1661 for (i = 0; i < bp->b_npages; i++) {
1662 vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
1663 vm_page_test_dirty(bp->b_pages[i]);
1664 }
1665
1666 /*
1732 /*
1733 * test the pages to see if they have been modified directly
1734 * by users through the VM system.
1735 */
1736 for (i = 0; i < bp->b_npages; i++) {
1737 vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
1738 vm_page_test_dirty(bp->b_pages[i]);
1739 }
1740
1741 /*
1667 * scan forwards for the first page modified
1742 * Calculate the encompassing dirty range, boffset and eoffset,
1743 * (eoffset - boffset) bytes.
1668 */
1744 */
1745
1669 for (i = 0; i < bp->b_npages; i++) {
1746 for (i = 0; i < bp->b_npages; i++) {
1670 if (bp->b_pages[i]->dirty) {
1747 if (bp->b_pages[i]->dirty)
1671 break;
1748 break;
1672 }
1673 }
1749 }
1674
1675 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1750 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1676 if (boffset < bp->b_dirtyoff) {
1677 bp->b_dirtyoff = max(boffset, 0);
1678 }
1679
1751
1680 /*
1681 * scan backwards for the last page modified
1682 */
1683 for (i = bp->b_npages - 1; i >= 0; --i) {
1684 if (bp->b_pages[i]->dirty) {
1685 break;
1686 }
1687 }
1752 for (i = bp->b_npages - 1; i >= 0; --i) {
1753 if (bp->b_pages[i]->dirty) {
1754 break;
1755 }
1756 }
1688 boffset = (i + 1);
1689#if 0
1690 offset = boffset + bp->b_pages[0]->pindex;
1691 if (offset >= object->size)
1692 boffset = object->size - bp->b_pages[0]->pindex;
1693#endif
1694 boffset = (boffset << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1695 if (bp->b_dirtyend < boffset)
1696 bp->b_dirtyend = min(boffset, bp->b_bufsize);
1757 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1758
1759 /*
1760 * Fit it to the buffer.
1761 */
1762
1763 if (eoffset > bp->b_bcount)
1764 eoffset = bp->b_bcount;
1765
1766 /*
1767 * If we have a good dirty range, merge with the existing
1768 * dirty range.
1769 */
1770
1771 if (boffset < eoffset) {
1772 if (bp->b_dirtyoff > boffset)
1773 bp->b_dirtyoff = boffset;
1774 if (bp->b_dirtyend < eoffset)
1775 bp->b_dirtyend = eoffset;
1776 }
1697 }
1698}
1699
1700/*
1777 }
1778}
1779
1780/*
1701 * Get a block given a specified block and offset into a file/device.
1781 * getblk:
1782 *
1783 * Get a block given a specified block and offset into a file/device.
1784 * The buffers B_DONE bit will be cleared on return, making it almost
1785 * ready for an I/O initiation. B_INVAL may or may not be set on
1786 * return. The caller should clear B_INVAL prior to initiating a
1787 * READ.
1788 *
1789 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
1790 * an existing buffer.
1791 *
1792 * For a VMIO buffer, B_CACHE is modified according to the backing VM.
1793 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
1794 * and then cleared based on the backing VM. If the previous buffer is
1795 * non-0-sized but invalid, B_CACHE will be cleared.
1796 *
1797 * If getblk() must create a new buffer, the new buffer is returned with
1798 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
1799 * case it is returned with B_INVAL clear and B_CACHE set based on the
1800 * backing VM.
1801 *
1802 * getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos
1803 * B_CACHE bit is clear.
1804 *
1805 * What this means, basically, is that the caller should use B_CACHE to
1806 * determine whether the buffer is fully valid or not and should clear
1807 * B_INVAL prior to issuing a read. If the caller intends to validate
1808 * the buffer by loading its data area with something, the caller needs
1809 * to clear B_INVAL. If the caller does this without issuing an I/O,
1810 * the caller should set B_CACHE ( as an optimization ), else the caller
1811 * should issue the I/O and biodone() will set B_CACHE if the I/O was
1812 * a write attempt or if it was a successfull read. If the caller
1813 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR
1814 * prior to issuing the READ. biodone() will *not* clear B_INVAL.
1702 */
1703struct buf *
1704getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1705{
1706 struct buf *bp;
1815 */
1816struct buf *
1817getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1818{
1819 struct buf *bp;
1707 int i, s;
1820 int s;
1708 struct bufhashhdr *bh;
1709
1710#if !defined(MAX_PERF)
1711 if (size > MAXBSIZE)
1712 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1713#endif
1714
1715 s = splbio();

--- 6 unchanged lines hidden (view full) ---

1722 needsbuffer |= VFS_BIO_NEED_ANY;
1723 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
1724 slptimeo);
1725 } else if (curproc != updateproc && numfreebuffers < lofreebuffers) {
1726 waitfreebuffers(slpflag, slptimeo);
1727 }
1728
1729 if ((bp = gbincore(vp, blkno))) {
1821 struct bufhashhdr *bh;
1822
1823#if !defined(MAX_PERF)
1824 if (size > MAXBSIZE)
1825 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1826#endif
1827
1828 s = splbio();

--- 6 unchanged lines hidden (view full) ---

1835 needsbuffer |= VFS_BIO_NEED_ANY;
1836 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
1837 slptimeo);
1838 } else if (curproc != updateproc && numfreebuffers < lofreebuffers) {
1839 waitfreebuffers(slpflag, slptimeo);
1840 }
1841
1842 if ((bp = gbincore(vp, blkno))) {
1843 /*
1844 * Buffer is in-core
1845 */
1846
1730 if (bp->b_flags & B_BUSY) {
1731 bp->b_flags |= B_WANTED;
1732 if (bp->b_usecount < BUF_MAXUSE)
1733 ++bp->b_usecount;
1734
1735 if (!tsleep(bp,
1736 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
1737 goto loop;
1738 }
1739
1740 splx(s);
1741 return (struct buf *) NULL;
1742 }
1847 if (bp->b_flags & B_BUSY) {
1848 bp->b_flags |= B_WANTED;
1849 if (bp->b_usecount < BUF_MAXUSE)
1850 ++bp->b_usecount;
1851
1852 if (!tsleep(bp,
1853 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
1854 goto loop;
1855 }
1856
1857 splx(s);
1858 return (struct buf *) NULL;
1859 }
1743 bp->b_flags |= B_BUSY | B_CACHE;
1860
1861 /*
1862 * Busy the buffer. B_CACHE is cleared if the buffer is
1863 * invalid. Ohterwise, for a non-VMIO buffer, B_CACHE is set
1864 * and for a VMIO buffer B_CACHE is adjusted according to the
1865 * backing VM cache.
1866 */
1867 bp->b_flags |= B_BUSY;
1868 if (bp->b_flags & B_INVAL)
1869 bp->b_flags &= ~B_CACHE;
1870 else if ((bp->b_flags & (B_VMIO|B_INVAL)) == 0)
1871 bp->b_flags |= B_CACHE;
1744 bremfree(bp);
1745
1746 /*
1747 * check for size inconsistancies for non-VMIO case.
1748 */
1749
1750 if (bp->b_bcount != size) {
1751 if ((bp->b_flags & B_VMIO) == 0 ||

--- 13 unchanged lines hidden (view full) ---

1765 }
1766 }
1767 goto loop;
1768 }
1769 }
1770
1771 /*
1772 * If the size is inconsistant in the VMIO case, we can resize
1872 bremfree(bp);
1873
1874 /*
1875 * check for size inconsistancies for non-VMIO case.
1876 */
1877
1878 if (bp->b_bcount != size) {
1879 if ((bp->b_flags & B_VMIO) == 0 ||

--- 13 unchanged lines hidden (view full) ---

1893 }
1894 }
1895 goto loop;
1896 }
1897 }
1898
1899 /*
1900 * If the size is inconsistant in the VMIO case, we can resize
1773 * the buffer. This might lead to B_CACHE getting cleared.
1901 * the buffer. This might lead to B_CACHE getting set or
1902 * cleared. If the size has not changed, B_CACHE remains
1903 * unchanged from its previous state.
1774 */
1775
1776 if (bp->b_bcount != size)
1777 allocbuf(bp, size);
1778
1779 KASSERT(bp->b_offset != NOOFFSET,
1780 ("getblk: no buffer offset"));
1781
1782 /*
1904 */
1905
1906 if (bp->b_bcount != size)
1907 allocbuf(bp, size);
1908
1909 KASSERT(bp->b_offset != NOOFFSET,
1910 ("getblk: no buffer offset"));
1911
1912 /*
1783 * Check that the constituted buffer really deserves for the
1784 * B_CACHE bit to be set. B_VMIO type buffers might not
1785 * contain fully valid pages. Normal (old-style) buffers
1786 * should be fully valid. This might also lead to B_CACHE
1787 * getting clear.
1913 * A buffer with B_DELWRI set and B_CACHE clear must
1914 * be committed before we can return the buffer in
1915 * order to prevent the caller from issuing a read
1916 * ( due to B_CACHE not being set ) and overwriting
1917 * it.
1788 *
1918 *
1789 * If B_CACHE is already clear, don't bother checking to see
1790 * if we have to clear it again.
1791 *
1792 * XXX this code should not be necessary unless the B_CACHE
1793 * handling is broken elsewhere in the kernel. We need to
1794 * check the cases and then turn the clearing part of this
1795 * code into a panic.
1919 * Most callers, including NFS and FFS, need this to
1920 * operate properly either because they assume they
1921 * can issue a read if B_CACHE is not set, or because
1922 * ( for example ) an uncached B_DELWRI might loop due
1923 * to softupdates re-dirtying the buffer. In the latter
1924 * case, B_CACHE is set after the first write completes,
1925 * preventing further loops.
1796 */
1926 */
1797 if (
1798 (bp->b_flags & (B_VMIO|B_CACHE)) == (B_VMIO|B_CACHE) &&
1799 (bp->b_vp->v_tag != VT_NFS || bp->b_validend <= 0)
1800 ) {
1801 int checksize = bp->b_bufsize;
1802 int poffset = bp->b_offset & PAGE_MASK;
1803 int resid;
1804 for (i = 0; i < bp->b_npages; i++) {
1805 resid = (checksize > (PAGE_SIZE - poffset)) ?
1806 (PAGE_SIZE - poffset) : checksize;
1807 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) {
1808 bp->b_flags &= ~(B_CACHE | B_DONE);
1809 break;
1810 }
1811 checksize -= resid;
1812 poffset = 0;
1813 }
1814 }
1815
1927
1816 /*
1817 * If B_DELWRI is set and B_CACHE got cleared ( or was
1818 * already clear ), we have to commit the write and
1819 * retry. The NFS code absolutely depends on this,
1820 * and so might the FFS code. In anycase, it formalizes
1821 * the B_CACHE rules. See sys/buf.h.
1822 */
1823
1824 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
1825 VOP_BWRITE(bp);
1826 goto loop;
1827 }
1828
1829 if (bp->b_usecount < BUF_MAXUSE)
1830 ++bp->b_usecount;
1831 splx(s);
1928 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
1929 VOP_BWRITE(bp);
1930 goto loop;
1931 }
1932
1933 if (bp->b_usecount < BUF_MAXUSE)
1934 ++bp->b_usecount;
1935 splx(s);
1832 return (bp);
1936 bp->b_flags &= ~B_DONE;
1833 } else {
1937 } else {
1938 /*
1939 * Buffer is not in-core, create new buffer. The buffer
1940 * returned by getnewbuf() is marked B_BUSY. Note that the
1941 * returned buffer is also considered valid ( not marked
1942 * B_INVAL ).
1943 */
1834 int bsize, maxsize, vmio;
1835 off_t offset;
1836
1837 if (vp->v_type == VBLK)
1838 bsize = DEV_BSIZE;
1839 else if (vp->v_mountedhere)
1840 bsize = vp->v_mountedhere->mnt_stat.f_iosize;
1841 else if (vp->v_mount)
1842 bsize = vp->v_mount->mnt_stat.f_iosize;
1843 else
1844 bsize = size;
1845
1846 offset = (off_t)blkno * bsize;
1847 vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
1848 maxsize = vmio ? size + (offset & PAGE_MASK) : size;
1849 maxsize = imax(maxsize, bsize);
1850
1851 if ((bp = getnewbuf(vp, blkno,
1944 int bsize, maxsize, vmio;
1945 off_t offset;
1946
1947 if (vp->v_type == VBLK)
1948 bsize = DEV_BSIZE;
1949 else if (vp->v_mountedhere)
1950 bsize = vp->v_mountedhere->mnt_stat.f_iosize;
1951 else if (vp->v_mount)
1952 bsize = vp->v_mount->mnt_stat.f_iosize;
1953 else
1954 bsize = size;
1955
1956 offset = (off_t)blkno * bsize;
1957 vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
1958 maxsize = vmio ? size + (offset & PAGE_MASK) : size;
1959 maxsize = imax(maxsize, bsize);
1960
1961 if ((bp = getnewbuf(vp, blkno,
1852 slpflag, slptimeo, size, maxsize)) == 0) {
1962 slpflag, slptimeo, size, maxsize)) == NULL) {
1853 if (slpflag || slptimeo) {
1854 splx(s);
1855 return NULL;
1856 }
1857 goto loop;
1858 }
1859
1860 /*
1861 * This code is used to make sure that a buffer is not
1862 * created while the getnewbuf routine is blocked.
1863 * This can be a problem whether the vnode is locked or not.
1963 if (slpflag || slptimeo) {
1964 splx(s);
1965 return NULL;
1966 }
1967 goto loop;
1968 }
1969
1970 /*
1971 * This code is used to make sure that a buffer is not
1972 * created while the getnewbuf routine is blocked.
1973 * This can be a problem whether the vnode is locked or not.
1974 * If the buffer is created out from under us, we have to
1975 * throw away the one we just created. There is now window
1976 * race because we are safely running at splbio() from the
1977 * point of the duplicate buffer creation through to here.
1864 */
1865 if (gbincore(vp, blkno)) {
1866 bp->b_flags |= B_INVAL;
1867 brelse(bp);
1868 goto loop;
1869 }
1870
1871 /*
1872 * Insert the buffer into the hash, so that it can
1873 * be found by incore.
1874 */
1875 bp->b_blkno = bp->b_lblkno = blkno;
1876 bp->b_offset = offset;
1877
1878 bgetvp(vp, bp);
1879 LIST_REMOVE(bp, b_hash);
1880 bh = BUFHASH(vp, blkno);
1881 LIST_INSERT_HEAD(bh, bp, b_hash);
1882
1978 */
1979 if (gbincore(vp, blkno)) {
1980 bp->b_flags |= B_INVAL;
1981 brelse(bp);
1982 goto loop;
1983 }
1984
1985 /*
1986 * Insert the buffer into the hash, so that it can
1987 * be found by incore.
1988 */
1989 bp->b_blkno = bp->b_lblkno = blkno;
1990 bp->b_offset = offset;
1991
1992 bgetvp(vp, bp);
1993 LIST_REMOVE(bp, b_hash);
1994 bh = BUFHASH(vp, blkno);
1995 LIST_INSERT_HEAD(bh, bp, b_hash);
1996
1997 /*
1998 * set B_VMIO bit. allocbuf() the buffer bigger. Since the
1999 * buffer size starts out as 0, B_CACHE will be set by
2000 * allocbuf() for the VMIO case prior to it testing the
2001 * backing store for validity.
2002 */
2003
1883 if (vmio) {
2004 if (vmio) {
1884 bp->b_flags |= (B_VMIO | B_CACHE);
2005 bp->b_flags |= B_VMIO;
1885#if defined(VFS_BIO_DEBUG)
1886 if (vp->v_type != VREG && vp->v_type != VBLK)
1887 printf("getblk: vmioing file type %d???\n", vp->v_type);
1888#endif
1889 } else {
1890 bp->b_flags &= ~B_VMIO;
1891 }
1892
1893 allocbuf(bp, size);
1894
1895 splx(s);
2006#if defined(VFS_BIO_DEBUG)
2007 if (vp->v_type != VREG && vp->v_type != VBLK)
2008 printf("getblk: vmioing file type %d???\n", vp->v_type);
2009#endif
2010 } else {
2011 bp->b_flags &= ~B_VMIO;
2012 }
2013
2014 allocbuf(bp, size);
2015
2016 splx(s);
1896 return (bp);
2017 bp->b_flags &= ~B_DONE;
1897 }
2018 }
2019 return (bp);
1898}
1899
1900/*
2020}
2021
2022/*
1901 * Get an empty, disassociated buffer of given size.
2023 * Get an empty, disassociated buffer of given size. The buffer is initially
2024 * set to B_INVAL.
1902 */
1903struct buf *
1904geteblk(int size)
1905{
1906 struct buf *bp;
1907 int s;
1908
1909 s = splbio();
1910 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0);
1911 splx(s);
1912 allocbuf(bp, size);
2025 */
2026struct buf *
2027geteblk(int size)
2028{
2029 struct buf *bp;
2030 int s;
2031
2032 s = splbio();
2033 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0);
2034 splx(s);
2035 allocbuf(bp, size);
1913 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
2036 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */
1914 return (bp);
1915}
1916
1917
1918/*
1919 * This code constitutes the buffer memory from either anonymous system
1920 * memory (in the case of non-VMIO operations) or from an associated
1921 * VM object (in the case of VMIO operations). This code is able to
1922 * resize a buffer up or down.
1923 *
1924 * Note that this code is tricky, and has many complications to resolve
1925 * deadlock or inconsistant data situations. Tread lightly!!!
1926 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
1927 * the caller. Calling this code willy nilly can result in the loss of data.
2037 return (bp);
2038}
2039
2040
2041/*
2042 * This code constitutes the buffer memory from either anonymous system
2043 * memory (in the case of non-VMIO operations) or from an associated
2044 * VM object (in the case of VMIO operations). This code is able to
2045 * resize a buffer up or down.
2046 *
2047 * Note that this code is tricky, and has many complications to resolve
2048 * deadlock or inconsistant data situations. Tread lightly!!!
2049 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2050 * the caller. Calling this code willy nilly can result in the loss of data.
2051 *
2052 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with
2053 * B_CACHE for the non-VMIO case.
1928 */
1929
1930int
1931allocbuf(struct buf *bp, int size)
1932{
1933 int newbsize, mbsize;
1934 int i;
1935

--- 4 unchanged lines hidden (view full) ---

1940 if (bp->b_kvasize < size)
1941 panic("allocbuf: buffer too small");
1942#endif
1943
1944 if ((bp->b_flags & B_VMIO) == 0) {
1945 caddr_t origbuf;
1946 int origbufsize;
1947 /*
2054 */
2055
2056int
2057allocbuf(struct buf *bp, int size)
2058{
2059 int newbsize, mbsize;
2060 int i;
2061

--- 4 unchanged lines hidden (view full) ---

2066 if (bp->b_kvasize < size)
2067 panic("allocbuf: buffer too small");
2068#endif
2069
2070 if ((bp->b_flags & B_VMIO) == 0) {
2071 caddr_t origbuf;
2072 int origbufsize;
2073 /*
1948 * Just get anonymous memory from the kernel
2074 * Just get anonymous memory from the kernel. Don't
2075 * mess with B_CACHE.
1949 */
1950 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1951#if !defined(NO_B_MALLOC)
1952 if (bp->b_flags & B_MALLOC)
1953 newbsize = mbsize;
1954 else
1955#endif
1956 newbsize = round_page(size);

--- 84 unchanged lines hidden (view full) ---

2041 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2042 desiredpages = (size == 0) ? 0 :
2043 num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2044
2045#if !defined(NO_B_MALLOC)
2046 if (bp->b_flags & B_MALLOC)
2047 panic("allocbuf: VMIO buffer can't be malloced");
2048#endif
2076 */
2077 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2078#if !defined(NO_B_MALLOC)
2079 if (bp->b_flags & B_MALLOC)
2080 newbsize = mbsize;
2081 else
2082#endif
2083 newbsize = round_page(size);

--- 84 unchanged lines hidden (view full) ---

2168 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2169 desiredpages = (size == 0) ? 0 :
2170 num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2171
2172#if !defined(NO_B_MALLOC)
2173 if (bp->b_flags & B_MALLOC)
2174 panic("allocbuf: VMIO buffer can't be malloced");
2175#endif
2176 /*
2177 * Set B_CACHE initially if buffer is 0 length or will become
2178 * 0-length.
2179 */
2180 if (size == 0 || bp->b_bufsize == 0)
2181 bp->b_flags |= B_CACHE;
2049
2050 if (newbsize < bp->b_bufsize) {
2182
2183 if (newbsize < bp->b_bufsize) {
2184 /*
2185 * DEV_BSIZE aligned new buffer size is less then the
2186 * DEV_BSIZE aligned existing buffer size. Figure out
2187 * if we have to remove any pages.
2188 */
2051 if (desiredpages < bp->b_npages) {
2052 for (i = desiredpages; i < bp->b_npages; i++) {
2053 /*
2054 * the page is not freed here -- it
2189 if (desiredpages < bp->b_npages) {
2190 for (i = desiredpages; i < bp->b_npages; i++) {
2191 /*
2192 * the page is not freed here -- it
2055 * is the responsibility of vnode_pager_setsize
2193 * is the responsibility of
2194 * vnode_pager_setsize
2056 */
2057 m = bp->b_pages[i];
2058 KASSERT(m != bogus_page,
2059 ("allocbuf: bogus page found"));
2060 while (vm_page_sleep_busy(m, TRUE, "biodep"))
2061 ;
2062
2063 bp->b_pages[i] = NULL;
2064 vm_page_unwire(m, 0);
2065 }
2066 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2067 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2068 bp->b_npages = desiredpages;
2069 }
2195 */
2196 m = bp->b_pages[i];
2197 KASSERT(m != bogus_page,
2198 ("allocbuf: bogus page found"));
2199 while (vm_page_sleep_busy(m, TRUE, "biodep"))
2200 ;
2201
2202 bp->b_pages[i] = NULL;
2203 vm_page_unwire(m, 0);
2204 }
2205 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2206 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2207 bp->b_npages = desiredpages;
2208 }
2070 } else if (newbsize > bp->b_bufsize) {
2071 vm_object_t obj;
2072 vm_offset_t tinc, toff;
2073 vm_ooffset_t off;
2074 vm_pindex_t objoff;
2075 int pageindex, curbpnpages;
2209 } else if (size > bp->b_bcount) {
2210 /*
2211 * We are growing the buffer, possibly in a
2212 * byte-granular fashion.
2213 */
2076 struct vnode *vp;
2214 struct vnode *vp;
2077 int bsize;
2078 int orig_validoff = bp->b_validoff;
2079 int orig_validend = bp->b_validend;
2215 vm_object_t obj;
2216 vm_offset_t toff;
2217 vm_offset_t tinc;
2080
2218
2219 /*
2220 * Step 1, bring in the VM pages from the object,
2221 * allocating them if necessary. We must clear
2222 * B_CACHE if these pages are not valid for the
2223 * range covered by the buffer.
2224 */
2225
2081 vp = bp->b_vp;
2226 vp = bp->b_vp;
2227 obj = vp->v_object;
2082
2228
2083 if (vp->v_type == VBLK)
2084 bsize = DEV_BSIZE;
2085 else
2086 bsize = vp->v_mount->mnt_stat.f_iosize;
2229 while (bp->b_npages < desiredpages) {
2230 vm_page_t m;
2231 vm_pindex_t pi;
2087
2232
2088 if (bp->b_npages < desiredpages) {
2089 obj = vp->v_object;
2090 tinc = PAGE_SIZE;
2091
2092 off = bp->b_offset;
2093 KASSERT(bp->b_offset != NOOFFSET,
2094 ("allocbuf: no buffer offset"));
2095 curbpnpages = bp->b_npages;
2096 doretry:
2097 bp->b_validoff = orig_validoff;
2098 bp->b_validend = orig_validend;
2099 bp->b_flags |= B_CACHE;
2100 for (toff = 0; toff < newbsize; toff += tinc) {
2101 objoff = OFF_TO_IDX(off + toff);
2102 pageindex = objoff - OFF_TO_IDX(off);
2103 tinc = PAGE_SIZE - ((off + toff) & PAGE_MASK);
2104 if (pageindex < curbpnpages) {
2105
2106 m = bp->b_pages[pageindex];
2107#ifdef VFS_BIO_DIAG
2108 if (m->pindex != objoff)
2109 panic("allocbuf: page changed offset?!!!?");
2110#endif
2111 if (tinc > (newbsize - toff))
2112 tinc = newbsize - toff;
2113 if (bp->b_flags & B_CACHE)
2114 vfs_buf_set_valid(bp, off, toff, tinc, m);
2115 continue;
2116 }
2117 m = vm_page_lookup(obj, objoff);
2118 if (!m) {
2119 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
2120 if (!m) {
2121 VM_WAIT;
2122 vm_pageout_deficit += (desiredpages - curbpnpages);
2123 goto doretry;
2124 }
2125
2233 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2234 if ((m = vm_page_lookup(obj, pi)) == NULL) {
2235 m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL);
2236 if (m == NULL) {
2237 VM_WAIT;
2238 vm_pageout_deficit += desiredpages - bp->b_npages;
2239 } else {
2126 vm_page_wire(m);
2127 vm_page_wakeup(m);
2128 bp->b_flags &= ~B_CACHE;
2240 vm_page_wire(m);
2241 vm_page_wakeup(m);
2242 bp->b_flags &= ~B_CACHE;
2129
2130 } else if (vm_page_sleep_busy(m, FALSE, "pgtblk")) {
2131 /*
2132 * If we had to sleep, retry.
2133 *
2134 * Also note that we only test
2135 * PG_BUSY here, not m->busy.
2136 *
2137 * We cannot sleep on m->busy
2138 * here because a vm_fault ->
2139 * getpages -> cluster-read ->
2140 * ...-> allocbuf sequence
2141 * will convert PG_BUSY to
2142 * m->busy so we have to let
2143 * m->busy through if we do
2144 * not want to deadlock.
2145 */
2146 goto doretry;
2147 } else {
2148 if ((curproc != pageproc) &&
2149 ((m->queue - m->pc) == PQ_CACHE) &&
2150 ((cnt.v_free_count + cnt.v_cache_count) <
2151 (cnt.v_free_min + cnt.v_cache_min))) {
2152 pagedaemon_wakeup();
2153 }
2154 if (tinc > (newbsize - toff))
2155 tinc = newbsize - toff;
2156 if (bp->b_flags & B_CACHE)
2157 vfs_buf_set_valid(bp, off, toff, tinc, m);
2158 vm_page_flag_clear(m, PG_ZERO);
2159 vm_page_wire(m);
2243 bp->b_pages[bp->b_npages] = m;
2244 ++bp->b_npages;
2160 }
2245 }
2161 bp->b_pages[pageindex] = m;
2162 curbpnpages = pageindex + 1;
2246 continue;
2163 }
2247 }
2164 if (vp->v_tag == VT_NFS &&
2165 vp->v_type != VBLK) {
2166 if (bp->b_dirtyend > 0) {
2167 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
2168 bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
2169 }
2170 if (bp->b_validend == 0)
2171 bp->b_flags &= ~B_CACHE;
2248
2249 /*
2250 * We found a page. If we have to sleep on it,
2251 * retry because it might have gotten freed out
2252 * from under us.
2253 *
2254 * We can only test PG_BUSY here. Blocking on
2255 * m->busy might lead to a deadlock:
2256 *
2257 * vm_fault->getpages->cluster_read->allocbuf
2258 *
2259 */
2260
2261 if (vm_page_sleep_busy(m, FALSE, "pgtblk"))
2262 continue;
2263
2264 /*
2265 * We have a good page. Should we wakeup the
2266 * page daemon?
2267 */
2268 if ((curproc != pageproc) &&
2269 ((m->queue - m->pc) == PQ_CACHE) &&
2270 ((cnt.v_free_count + cnt.v_cache_count) <
2271 (cnt.v_free_min + cnt.v_cache_min))
2272 ) {
2273 pagedaemon_wakeup();
2172 }
2274 }
2173 bp->b_data = (caddr_t) trunc_page((vm_offset_t)bp->b_data);
2174 bp->b_npages = curbpnpages;
2175 pmap_qenter((vm_offset_t) bp->b_data,
2176 bp->b_pages, bp->b_npages);
2177 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
2275 vm_page_flag_clear(m, PG_ZERO);
2276 vm_page_wire(m);
2277 bp->b_pages[bp->b_npages] = m;
2278 ++bp->b_npages;
2178 }
2279 }
2280
2281 /*
2282 * Step 2. We've loaded the pages into the buffer,
2283 * we have to figure out if we can still have B_CACHE
2284 * set. Note that B_CACHE is set according to the
2285 * byte-granular range ( bcount and size ), new the
2286 * aligned range ( newbsize ).
2287 *
2288 * The VM test is against m->valid, which is DEV_BSIZE
2289 * aligned. Needless to say, the validity of the data
2290 * needs to also be DEV_BSIZE aligned. Note that this
2291 * fails with NFS if the server or some other client
2292 * extends the file's EOF. If our buffer is resized,
2293 * B_CACHE may remain set! XXX
2294 */
2295
2296 toff = bp->b_bcount;
2297 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2298
2299 while ((bp->b_flags & B_CACHE) && toff < size) {
2300 vm_pindex_t pi;
2301
2302 if (tinc > (size - toff))
2303 tinc = size - toff;
2304
2305 pi = ((bp->b_offset & PAGE_MASK) + toff) >>
2306 PAGE_SHIFT;
2307
2308 vfs_buf_test_cache(
2309 bp,
2310 bp->b_offset,
2311 toff,
2312 tinc,
2313 bp->b_pages[pi]
2314 );
2315 toff += tinc;
2316 tinc = PAGE_SIZE;
2317 }
2318
2319 /*
2320 * Step 3, fixup the KVM pmap. Remember that
2321 * bp->b_data is relative to bp->b_offset, but
2322 * bp->b_offset may be offset into the first page.
2323 */
2324
2325 bp->b_data = (caddr_t)
2326 trunc_page((vm_offset_t)bp->b_data);
2327 pmap_qenter(
2328 (vm_offset_t)bp->b_data,
2329 bp->b_pages,
2330 bp->b_npages
2331 );
2332 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
2333 (vm_offset_t)(bp->b_offset & PAGE_MASK));
2179 }
2180 }
2181 if (bp->b_flags & B_VMIO)
2182 vmiospace += (newbsize - bp->b_bufsize);
2183 bufspace += (newbsize - bp->b_bufsize);
2184 runningbufspace += (newbsize - bp->b_bufsize);
2185 if (newbsize < bp->b_bufsize)
2186 bufspacewakeup();
2334 }
2335 }
2336 if (bp->b_flags & B_VMIO)
2337 vmiospace += (newbsize - bp->b_bufsize);
2338 bufspace += (newbsize - bp->b_bufsize);
2339 runningbufspace += (newbsize - bp->b_bufsize);
2340 if (newbsize < bp->b_bufsize)
2341 bufspacewakeup();
2187 bp->b_bufsize = newbsize;
2188 bp->b_bcount = size;
2342 bp->b_bufsize = newbsize; /* actual buffer allocation */
2343 bp->b_bcount = size; /* requested buffer size */
2189 return 1;
2190}
2191
2192/*
2344 return 1;
2345}
2346
2347/*
2193 * Wait for buffer I/O completion, returning error status.
2348 * biowait:
2349 *
2350 * Wait for buffer I/O completion, returning error status. The buffer
2351 * is left B_BUSY|B_DONE on return. B_EINTR is converted into a EINTR
2352 * error and cleared.
2194 */
2195int
2196biowait(register struct buf * bp)
2197{
2198 int s;
2199
2200 s = splbio();
2201 while ((bp->b_flags & B_DONE) == 0)

--- 13 unchanged lines hidden (view full) ---

2215 if (bp->b_flags & B_ERROR) {
2216 return (bp->b_error ? bp->b_error : EIO);
2217 } else {
2218 return (0);
2219 }
2220}
2221
2222/*
2353 */
2354int
2355biowait(register struct buf * bp)
2356{
2357 int s;
2358
2359 s = splbio();
2360 while ((bp->b_flags & B_DONE) == 0)

--- 13 unchanged lines hidden (view full) ---

2374 if (bp->b_flags & B_ERROR) {
2375 return (bp->b_error ? bp->b_error : EIO);
2376 } else {
2377 return (0);
2378 }
2379}
2380
2381/*
2223 * Finish I/O on a buffer, calling an optional function.
2224 * This is usually called from interrupt level, so process blocking
2225 * is not *a good idea*.
2382 * biodone:
2383 *
2384 * Finish I/O on a buffer, optionally calling a completion function.
2385 * This is usually called from an interrupt so process blocking is
2386 * not allowed.
2387 *
2388 * biodone is also responsible for setting B_CACHE in a B_VMIO bp.
2389 * In a non-VMIO bp, B_CACHE will be set on the next getblk()
2390 * assuming B_INVAL is clear.
2391 *
2392 * For the VMIO case, we set B_CACHE if the op was a read and no
2393 * read error occured, or if the op was a write. B_CACHE is never
2394 * set if the buffer is invalid or otherwise uncacheable.
2395 *
2396 * biodone does not mess with B_INVAL, allowing the I/O routine or the
2397 * initiator to leave B_INVAL set to brelse the buffer out of existance
2398 * in the biodone routine.
2226 */
2227void
2228biodone(register struct buf * bp)
2229{
2230 int s;
2231
2232 s = splbio();
2233

--- 56 unchanged lines hidden (view full) ---

2290 }
2291#endif
2292#if defined(VFS_BIO_DEBUG)
2293 if (obj->paging_in_progress < bp->b_npages) {
2294 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2295 obj->paging_in_progress, bp->b_npages);
2296 }
2297#endif
2399 */
2400void
2401biodone(register struct buf * bp)
2402{
2403 int s;
2404
2405 s = splbio();
2406

--- 56 unchanged lines hidden (view full) ---

2463 }
2464#endif
2465#if defined(VFS_BIO_DEBUG)
2466 if (obj->paging_in_progress < bp->b_npages) {
2467 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2468 obj->paging_in_progress, bp->b_npages);
2469 }
2470#endif
2298 iosize = bp->b_bufsize;
2471
2472 /*
2473 * Set B_CACHE if the op was a normal read and no error
2474 * occured. B_CACHE is set for writes in the b*write()
2475 * routines.
2476 */
2477 iosize = bp->b_bcount;
2478 if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) {
2479 bp->b_flags |= B_CACHE;
2480 }
2481
2299 for (i = 0; i < bp->b_npages; i++) {
2300 int bogusflag = 0;
2301 m = bp->b_pages[i];
2302 if (m == bogus_page) {
2303 bogusflag = 1;
2304 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2305 if (!m) {
2306#if defined(VFS_BIO_DEBUG)
2307 printf("biodone: page disappeared\n");
2308#endif
2309 vm_object_pip_subtract(obj, 1);
2482 for (i = 0; i < bp->b_npages; i++) {
2483 int bogusflag = 0;
2484 m = bp->b_pages[i];
2485 if (m == bogus_page) {
2486 bogusflag = 1;
2487 m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2488 if (!m) {
2489#if defined(VFS_BIO_DEBUG)
2490 printf("biodone: page disappeared\n");
2491#endif
2492 vm_object_pip_subtract(obj, 1);
2493 bp->b_flags &= ~B_CACHE;
2310 continue;
2311 }
2312 bp->b_pages[i] = m;
2313 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2314 }
2315#if defined(VFS_BIO_DEBUG)
2316 if (OFF_TO_IDX(foff) != m->pindex) {
2317 printf(
2318"biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2319 (unsigned long)foff, m->pindex);
2320 }
2321#endif
2322 resid = IDX_TO_OFF(m->pindex + 1) - foff;
2323 if (resid > iosize)
2324 resid = iosize;
2325
2326 /*
2327 * In the write case, the valid and clean bits are
2494 continue;
2495 }
2496 bp->b_pages[i] = m;
2497 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2498 }
2499#if defined(VFS_BIO_DEBUG)
2500 if (OFF_TO_IDX(foff) != m->pindex) {
2501 printf(
2502"biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2503 (unsigned long)foff, m->pindex);
2504 }
2505#endif
2506 resid = IDX_TO_OFF(m->pindex + 1) - foff;
2507 if (resid > iosize)
2508 resid = iosize;
2509
2510 /*
2511 * In the write case, the valid and clean bits are
2328 * already changed correctly, so we only need to do this
2329 * here in the read case.
2512 * already changed correctly ( see bdwrite() ), so we
2513 * only need to do this here in the read case.
2330 */
2331 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
2332 vfs_page_set_valid(bp, foff, i, m);
2333 }
2334 vm_page_flag_clear(m, PG_ZERO);
2335
2336 /*
2337 * when debugging new filesystems or buffer I/O methods, this

--- 110 unchanged lines hidden (view full) ---

2448 vm_page_flag_clear(m, PG_ZERO);
2449 vm_page_io_finish(m);
2450 }
2451 vm_object_pip_wakeupn(obj, 0);
2452 }
2453}
2454
2455/*
2514 */
2515 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
2516 vfs_page_set_valid(bp, foff, i, m);
2517 }
2518 vm_page_flag_clear(m, PG_ZERO);
2519
2520 /*
2521 * when debugging new filesystems or buffer I/O methods, this

--- 110 unchanged lines hidden (view full) ---

2632 vm_page_flag_clear(m, PG_ZERO);
2633 vm_page_io_finish(m);
2634 }
2635 vm_object_pip_wakeupn(obj, 0);
2636 }
2637}
2638
2639/*
2456 * Set NFS' b_validoff and b_validend fields from the valid bits
2457 * of a page. If the consumer is not NFS, and the page is not
2458 * valid for the entire range, clear the B_CACHE flag to force
2459 * the consumer to re-read the page.
2640 * vfs_page_set_valid:
2460 *
2641 *
2461 * B_CACHE interaction is especially tricky.
2462 */
2463static void
2464vfs_buf_set_valid(struct buf *bp,
2465 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
2466 vm_page_t m)
2467{
2468 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) {
2469 vm_offset_t svalid, evalid;
2470 int validbits = m->valid >> (((foff+off)&PAGE_MASK)/DEV_BSIZE);
2471
2472 /*
2473 * This only bothers with the first valid range in the
2474 * page.
2475 */
2476 svalid = off;
2477 while (validbits && !(validbits & 1)) {
2478 svalid += DEV_BSIZE;
2479 validbits >>= 1;
2480 }
2481 evalid = svalid;
2482 while (validbits & 1) {
2483 evalid += DEV_BSIZE;
2484 validbits >>= 1;
2485 }
2486 evalid = min(evalid, off + size);
2487 /*
2488 * We can only set b_validoff/end if this range is contiguous
2489 * with the range built up already. If we cannot set
2490 * b_validoff/end, we must clear B_CACHE to force an update
2491 * to clean the bp up.
2492 */
2493 if (svalid == bp->b_validend) {
2494 bp->b_validoff = min(bp->b_validoff, svalid);
2495 bp->b_validend = max(bp->b_validend, evalid);
2496 } else {
2497 bp->b_flags &= ~B_CACHE;
2498 }
2499 } else if (!vm_page_is_valid(m,
2500 (vm_offset_t) ((foff + off) & PAGE_MASK),
2501 size)) {
2502 bp->b_flags &= ~B_CACHE;
2503 }
2504}
2505
2506/*
2507 * Set the valid bits in a page, taking care of the b_validoff,
2508 * b_validend fields which NFS uses to optimise small reads. Off is
2509 * the offset within the file and pageno is the page index within the buf.
2642 * Set the valid bits in a page based on the supplied offset. The
2643 * range is restricted to the buffer's size.
2510 *
2644 *
2511 * XXX we have to set the valid & clean bits for all page fragments
2512 * touched by b_validoff/validend, even if the page fragment goes somewhat
2513 * beyond b_validoff/validend due to alignment.
2645 * For NFS, the range is additionally restricted to b_validoff/end.
2646 * validoff/end must be DEV_BSIZE chunky or the end must be at the
2647 * file EOF. If a dirty range exists, set the page's dirty bits
2648 * inclusively.
2649 *
2650 * This routine is typically called after a read completes.
2514 */
2515static void
2516vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2517{
2651 */
2652static void
2653vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2654{
2518 struct vnode *vp = bp->b_vp;
2519 vm_ooffset_t soff, eoff;
2520
2521 /*
2522 * Start and end offsets in buffer. eoff - soff may not cross a
2655 vm_ooffset_t soff, eoff;
2656
2657 /*
2658 * Start and end offsets in buffer. eoff - soff may not cross a
2523 * page boundry or cross the end of the buffer.
2659 * page boundry or cross the end of the buffer. The end of the
2660 * buffer, in this case, is our file EOF, not the allocation size
2661 * of the buffer.
2524 */
2525 soff = off;
2526 eoff = (off + PAGE_SIZE) & ~PAGE_MASK;
2662 */
2663 soff = off;
2664 eoff = (off + PAGE_SIZE) & ~PAGE_MASK;
2527 if (eoff > bp->b_offset + bp->b_bufsize)
2528 eoff = bp->b_offset + bp->b_bufsize;
2665 if (eoff > bp->b_offset + bp->b_bcount)
2666 eoff = bp->b_offset + bp->b_bcount;
2529
2667
2530 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) {
2531 vm_ooffset_t sv, ev;
2532 vm_page_set_invalid(m,
2533 (vm_offset_t) (soff & PAGE_MASK),
2534 (vm_offset_t) (eoff - soff));
2535 /*
2536 * bp->b_validoff and bp->b_validend restrict the valid range
2537 * that we can set. Note that these offsets are not DEV_BSIZE
2538 * aligned. vm_page_set_validclean() must know what
2539 * sub-DEV_BSIZE ranges to clear.
2540 */
2541#if 0
2542 sv = (bp->b_offset + bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2543 ev = (bp->b_offset + bp->b_validend + (DEV_BSIZE - 1)) &
2544 ~(DEV_BSIZE - 1);
2545#endif
2546 sv = bp->b_offset + bp->b_validoff;
2547 ev = bp->b_offset + bp->b_validend;
2548 soff = qmax(sv, soff);
2549 eoff = qmin(ev, eoff);
2668 /*
2669 * Set valid range. This is typically the entire buffer and thus the
2670 * entire page.
2671 */
2672 if (eoff > soff) {
2673 vm_page_set_validclean(
2674 m,
2675 (vm_offset_t) (soff & PAGE_MASK),
2676 (vm_offset_t) (eoff - soff)
2677 );
2550 }
2678 }
2551
2552 if (eoff > soff)
2553 vm_page_set_validclean(m,
2554 (vm_offset_t) (soff & PAGE_MASK),
2555 (vm_offset_t) (eoff - soff));
2556}
2557
2558/*
2559 * This routine is called before a device strategy routine.
2560 * It is used to tell the VM system that paging I/O is in
2561 * progress, and treat the pages associated with the buffer
2562 * almost as being PG_BUSY. Also the object paging_in_progress
2563 * flag is handled to make sure that the object doesn't become
2564 * inconsistant.
2679}
2680
2681/*
2682 * This routine is called before a device strategy routine.
2683 * It is used to tell the VM system that paging I/O is in
2684 * progress, and treat the pages associated with the buffer
2685 * almost as being PG_BUSY. Also the object paging_in_progress
2686 * flag is handled to make sure that the object doesn't become
2687 * inconsistant.
2688 *
2689 * Since I/O has not been initiated yet, certain buffer flags
2690 * such as B_ERROR or B_INVAL may be in an inconsistant state
2691 * and should be ignored.
2565 */
2566void
2567vfs_busy_pages(struct buf * bp, int clear_modify)
2568{
2569 int i, bogus;
2570
2571 if (bp->b_flags & B_VMIO) {
2572 struct vnode *vp = bp->b_vp;

--- 17 unchanged lines hidden (view full) ---

2590 vm_page_t m = bp->b_pages[i];
2591
2592 vm_page_flag_clear(m, PG_ZERO);
2593 if ((bp->b_flags & B_CLUSTER) == 0) {
2594 vm_object_pip_add(obj, 1);
2595 vm_page_io_start(m);
2596 }
2597
2692 */
2693void
2694vfs_busy_pages(struct buf * bp, int clear_modify)
2695{
2696 int i, bogus;
2697
2698 if (bp->b_flags & B_VMIO) {
2699 struct vnode *vp = bp->b_vp;

--- 17 unchanged lines hidden (view full) ---

2717 vm_page_t m = bp->b_pages[i];
2718
2719 vm_page_flag_clear(m, PG_ZERO);
2720 if ((bp->b_flags & B_CLUSTER) == 0) {
2721 vm_object_pip_add(obj, 1);
2722 vm_page_io_start(m);
2723 }
2724
2725 /*
2726 * When readying a buffer for a read ( i.e
2727 * clear_modify == 0 ), it is important to do
2728 * bogus_page replacement for valid pages in
2729 * partially instantiated buffers. Partially
2730 * instantiated buffers can, in turn, occur when
2731 * reconstituting a buffer from its VM backing store
2732 * base. We only have to do this if B_CACHE is
2733 * clear ( which causes the I/O to occur in the
2734 * first place ). The replacement prevents the read
2735 * I/O from overwriting potentially dirty VM-backed
2736 * pages. XXX bogus page replacement is, uh, bogus.
2737 * It may not work properly with small-block devices.
2738 * We need to find a better way.
2739 */
2740
2598 vm_page_protect(m, VM_PROT_NONE);
2599 if (clear_modify)
2600 vfs_page_set_valid(bp, foff, i, m);
2601 else if (m->valid == VM_PAGE_BITS_ALL &&
2602 (bp->b_flags & B_CACHE) == 0) {
2603 bp->b_pages[i] = bogus_page;
2604 bogus++;
2605 }
2606 foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2607 }
2608 if (bogus)
2609 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2610 }
2611}
2612
2613/*
2614 * Tell the VM system that the pages associated with this buffer
2615 * are clean. This is used for delayed writes where the data is
2616 * going to go to disk eventually without additional VM intevention.
2741 vm_page_protect(m, VM_PROT_NONE);
2742 if (clear_modify)
2743 vfs_page_set_valid(bp, foff, i, m);
2744 else if (m->valid == VM_PAGE_BITS_ALL &&
2745 (bp->b_flags & B_CACHE) == 0) {
2746 bp->b_pages[i] = bogus_page;
2747 bogus++;
2748 }
2749 foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2750 }
2751 if (bogus)
2752 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2753 }
2754}
2755
2756/*
2757 * Tell the VM system that the pages associated with this buffer
2758 * are clean. This is used for delayed writes where the data is
2759 * going to go to disk eventually without additional VM intevention.
2760 *
2761 * Note that while we only really need to clean through to b_bcount, we
2762 * just go ahead and clean through to b_bufsize.
2617 */
2763 */
2618void
2764static void
2619vfs_clean_pages(struct buf * bp)
2620{
2621 int i;
2622
2623 if (bp->b_flags & B_VMIO) {
2624 vm_ooffset_t foff;
2765vfs_clean_pages(struct buf * bp)
2766{
2767 int i;
2768
2769 if (bp->b_flags & B_VMIO) {
2770 vm_ooffset_t foff;
2771
2625 foff = bp->b_offset;
2626 KASSERT(bp->b_offset != NOOFFSET,
2627 ("vfs_clean_pages: no buffer offset"));
2628 for (i = 0; i < bp->b_npages; i++) {
2629 vm_page_t m = bp->b_pages[i];
2772 foff = bp->b_offset;
2773 KASSERT(bp->b_offset != NOOFFSET,
2774 ("vfs_clean_pages: no buffer offset"));
2775 for (i = 0; i < bp->b_npages; i++) {
2776 vm_page_t m = bp->b_pages[i];
2777 vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2778 vm_ooffset_t eoff = noff;
2779
2780 if (eoff > bp->b_offset + bp->b_bufsize)
2781 eoff = bp->b_offset + bp->b_bufsize;
2630 vfs_page_set_valid(bp, foff, i, m);
2782 vfs_page_set_valid(bp, foff, i, m);
2631 foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2783 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2784 foff = noff;
2632 }
2633 }
2634}
2635
2785 }
2786 }
2787}
2788
2789/*
2790 * vfs_bio_set_validclean:
2791 *
2792 * Set the range within the buffer to valid and clean. The range is
2793 * relative to the beginning of the buffer, b_offset. Note that b_offset
2794 * itself may be offset from the beginning of the first page.
2795 */
2796
2797void
2798vfs_bio_set_validclean(struct buf *bp, int base, int size)
2799{
2800 if (bp->b_flags & B_VMIO) {
2801 int i;
2802 int n;
2803
2804 /*
2805 * Fixup base to be relative to beginning of first page.
2806 * Set initial n to be the maximum number of bytes in the
2807 * first page that can be validated.
2808 */
2809
2810 base += (bp->b_offset & PAGE_MASK);
2811 n = PAGE_SIZE - (base & PAGE_MASK);
2812
2813 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
2814 vm_page_t m = bp->b_pages[i];
2815
2816 if (n > size)
2817 n = size;
2818
2819 vm_page_set_validclean(m, base & PAGE_MASK, n);
2820 base += n;
2821 size -= n;
2822 n = PAGE_SIZE;
2823 }
2824 }
2825}
2826
2827/*
2828 * vfs_bio_clrbuf:
2829 *
2830 * clear a buffer. This routine essentially fakes an I/O, so we need
2831 * to clear B_ERROR and B_INVAL.
2832 *
2833 * Note that while we only theoretically need to clear through b_bcount,
2834 * we go ahead and clear through b_bufsize.
2835 */
2836
2636void
2637vfs_bio_clrbuf(struct buf *bp) {
2638 int i, mask = 0;
2639 caddr_t sa, ea;
2640 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
2837void
2838vfs_bio_clrbuf(struct buf *bp) {
2839 int i, mask = 0;
2840 caddr_t sa, ea;
2841 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
2842 bp->b_flags &= ~(B_INVAL|B_ERROR);
2641 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
2642 (bp->b_offset & PAGE_MASK) == 0) {
2643 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
2644 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
2645 ((bp->b_pages[0]->valid & mask) != mask)) {
2646 bzero(bp->b_data, bp->b_bufsize);
2647 }
2648 bp->b_pages[0]->valid |= mask;

--- 137 unchanged lines hidden ---
2843 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
2844 (bp->b_offset & PAGE_MASK) == 0) {
2845 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
2846 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
2847 ((bp->b_pages[0]->valid & mask) != mask)) {
2848 bzero(bp->b_data, bp->b_bufsize);
2849 }
2850 bp->b_pages[0]->valid |= mask;

--- 137 unchanged lines hidden ---