Deleted Added
full compact
vfs_bio.c (26599) vfs_bio.c (26664)
1/*
2 * Copyright (c) 1994 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 4 unchanged lines hidden (view full) ---

13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. This work was done expressly for inclusion into FreeBSD. Other use
17 * is allowed if this notation is included.
18 * 5. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
1/*
2 * Copyright (c) 1994 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 4 unchanged lines hidden (view full) ---

13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. This work was done expressly for inclusion into FreeBSD. Other use
17 * is allowed if this notation is included.
18 * 5. Modifications may be freely made to this file if the above conditions
19 * are met.
20 *
21 * $Id: vfs_bio.c,v 1.119 1997/06/06 09:04:28 dfr Exp $
21 * $Id: vfs_bio.c,v 1.120 1997/06/13 08:30:40 bde Exp $
22 */
23
24/*
25 * this file contains a new buffer I/O scheme implementing a coherent
26 * VM object and buffer cache scheme. Pains have been taken to make
27 * sure that the performance degradation associated with schemes such
28 * as this is not realized.
29 *

--- 50 unchanged lines hidden (view full) ---

80static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff,
81 vm_offset_t off, vm_offset_t size,
82 vm_page_t m);
83static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
84 int pageno, vm_page_t m);
85static void vfs_clean_pages(struct buf * bp);
86static void vfs_setdirty(struct buf *bp);
87static void vfs_vmio_release(struct buf *bp);
22 */
23
24/*
25 * this file contains a new buffer I/O scheme implementing a coherent
26 * VM object and buffer cache scheme. Pains have been taken to make
27 * sure that the performance degradation associated with schemes such
28 * as this is not realized.
29 *

--- 50 unchanged lines hidden (view full) ---

80static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff,
81 vm_offset_t off, vm_offset_t size,
82 vm_page_t m);
83static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
84 int pageno, vm_page_t m);
85static void vfs_clean_pages(struct buf * bp);
86static void vfs_setdirty(struct buf *bp);
87static void vfs_vmio_release(struct buf *bp);
88static void flushdirtybuffers(int slpflag, int slptimeo);
88
89int needsbuffer;
90
91/*
92 * Internal update daemon, process 3
93 * The variable vfs_update_wakeup allows for internal syncs.
94 */
95int vfs_update_wakeup;

--- 10 unchanged lines hidden (view full) ---

106 * for input in the case of buffers partially already in memory,
107 * but the code is intricate enough already.
108 */
109vm_page_t bogus_page;
110static vm_offset_t bogus_offset;
111
112static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
113 bufmallocspace, maxbufmallocspace;
89
90int needsbuffer;
91
92/*
93 * Internal update daemon, process 3
94 * The variable vfs_update_wakeup allows for internal syncs.
95 */
96int vfs_update_wakeup;

--- 10 unchanged lines hidden (view full) ---

107 * for input in the case of buffers partially already in memory,
108 * but the code is intricate enough already.
109 */
110vm_page_t bogus_page;
111static vm_offset_t bogus_offset;
112
113static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
114 bufmallocspace, maxbufmallocspace;
115int numdirtybuffers, lodirtybuffers, hidirtybuffers;
116static int numfreebuffers, lofreebuffers, hifreebuffers;
114
117
118SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD,
119 &numdirtybuffers, 0, "");
120SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW,
121 &lodirtybuffers, 0, "");
122SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW,
123 &hidirtybuffers, 0, "");
124SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD,
125 &numfreebuffers, 0, "");
126SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW,
127 &lofreebuffers, 0, "");
128SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW,
129 &hifreebuffers, 0, "");
130SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW,
131 &maxbufspace, 0, "");
132SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD,
133 &bufspace, 0, "");
134SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW,
135 &maxvmiobufspace, 0, "");
136SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD,
137 &vmiospace, 0, "");
138SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW,
139 &maxbufmallocspace, 0, "");
140SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD,
141 &bufmallocspace, 0, "");
142
115static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
116static struct bqueues bufqueues[BUFFER_QUEUES];
117
118extern int vm_swap_size;
119
143static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
144static struct bqueues bufqueues[BUFFER_QUEUES];
145
146extern int vm_swap_size;
147
120#define BUF_MAXUSE 16
148#define BUF_MAXUSE 24
121
149
150#define VFS_BIO_NEED_ANY 1
151#define VFS_BIO_NEED_LOWLIMIT 2
152#define VFS_BIO_NEED_FREE 4
153
122/*
123 * Initialize buffer headers and related structures.
124 */
125void
126bufinit()
127{
128 struct buf *bp;
129 int i;

--- 37 unchanged lines hidden (view full) ---

167 * Limit the amount of malloc memory since it is wired permanently into
168 * the kernel space. Even though this is accounted for in the buffer
169 * allocation, we don't want the malloced region to grow uncontrolled.
170 * The malloc scheme improves memory utilization significantly on average
171 * (small) directories.
172 */
173 maxbufmallocspace = maxbufspace / 20;
174
154/*
155 * Initialize buffer headers and related structures.
156 */
157void
158bufinit()
159{
160 struct buf *bp;
161 int i;

--- 37 unchanged lines hidden (view full) ---

199 * Limit the amount of malloc memory since it is wired permanently into
200 * the kernel space. Even though this is accounted for in the buffer
201 * allocation, we don't want the malloced region to grow uncontrolled.
202 * The malloc scheme improves memory utilization significantly on average
203 * (small) directories.
204 */
205 maxbufmallocspace = maxbufspace / 20;
206
207/*
208 * Remove the probability of deadlock conditions by limiting the
209 * number of dirty buffers.
210 */
211 hidirtybuffers = nbuf / 6 + 20;
212 lodirtybuffers = nbuf / 12 + 10;
213 numdirtybuffers = 0;
214 lofreebuffers = nbuf / 18 + 5;
215 hifreebuffers = 2 * lofreebuffers;
216 numfreebuffers = nbuf;
217
175 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
176 bogus_page = vm_page_alloc(kernel_object,
177 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
178 VM_ALLOC_NORMAL);
179
180}
181
182/*

--- 22 unchanged lines hidden (view full) ---

205bremfree(struct buf * bp)
206{
207 int s = splbio();
208
209 if (bp->b_qindex != QUEUE_NONE) {
210 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
211 bp->b_qindex = QUEUE_NONE;
212 } else {
218 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
219 bogus_page = vm_page_alloc(kernel_object,
220 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
221 VM_ALLOC_NORMAL);
222
223}
224
225/*

--- 22 unchanged lines hidden (view full) ---

248bremfree(struct buf * bp)
249{
250 int s = splbio();
251
252 if (bp->b_qindex != QUEUE_NONE) {
253 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
254 bp->b_qindex = QUEUE_NONE;
255 } else {
256#if !defined(MAX_PERF)
213 panic("bremfree: removing a buffer when not on a queue");
257 panic("bremfree: removing a buffer when not on a queue");
258#endif
214 }
259 }
260 if ((bp->b_flags & B_INVAL) ||
261 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0)
262 --numfreebuffers;
215 splx(s);
216}
217
218/*
219 * Get a buffer with the specified data. Look in the cache first.
220 */
221int
222bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,

--- 88 unchanged lines hidden (view full) ---

311bwrite(struct buf * bp)
312{
313 int oldflags = bp->b_flags;
314
315 if (bp->b_flags & B_INVAL) {
316 brelse(bp);
317 return (0);
318 }
263 splx(s);
264}
265
266/*
267 * Get a buffer with the specified data. Look in the cache first.
268 */
269int
270bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,

--- 88 unchanged lines hidden (view full) ---

359bwrite(struct buf * bp)
360{
361 int oldflags = bp->b_flags;
362
363 if (bp->b_flags & B_INVAL) {
364 brelse(bp);
365 return (0);
366 }
367#if !defined(MAX_PERF)
319 if (!(bp->b_flags & B_BUSY))
320 panic("bwrite: buffer is not busy???");
368 if (!(bp->b_flags & B_BUSY))
369 panic("bwrite: buffer is not busy???");
370#endif
321
322 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
323 bp->b_flags |= B_WRITEINPROG;
324
371
372 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
373 bp->b_flags |= B_WRITEINPROG;
374
325 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
375 if ((oldflags & B_DELWRI) == B_DELWRI) {
376 --numdirtybuffers;
326 reassignbuf(bp, bp->b_vp);
327 }
328
329 bp->b_vp->v_numoutput++;
330 vfs_busy_pages(bp, 1);
331 if (curproc != NULL)
332 curproc->p_stats->p_ru.ru_oublock++;
333 VOP_STRATEGY(bp);

--- 46 unchanged lines hidden (view full) ---

380
381int
382vn_bwrite(ap)
383 struct vop_bwrite_args *ap;
384{
385 return (bwrite(ap->a_bp));
386}
387
377 reassignbuf(bp, bp->b_vp);
378 }
379
380 bp->b_vp->v_numoutput++;
381 vfs_busy_pages(bp, 1);
382 if (curproc != NULL)
383 curproc->p_stats->p_ru.ru_oublock++;
384 VOP_STRATEGY(bp);

--- 46 unchanged lines hidden (view full) ---

431
432int
433vn_bwrite(ap)
434 struct vop_bwrite_args *ap;
435{
436 return (bwrite(ap->a_bp));
437}
438
439void
440vfs_bio_need_satisfy(void) {
441 ++numfreebuffers;
442 if (!needsbuffer)
443 return;
444 if (numdirtybuffers < lodirtybuffers) {
445 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT);
446 } else {
447 needsbuffer &= ~VFS_BIO_NEED_ANY;
448 }
449 if (numfreebuffers >= hifreebuffers) {
450 needsbuffer &= ~VFS_BIO_NEED_FREE;
451 }
452 wakeup(&needsbuffer);
453}
454
388/*
389 * Delayed write. (Buffer is marked dirty).
390 */
391void
392bdwrite(struct buf * bp)
393{
394
455/*
456 * Delayed write. (Buffer is marked dirty).
457 */
458void
459bdwrite(struct buf * bp)
460{
461
462#if !defined(MAX_PERF)
395 if ((bp->b_flags & B_BUSY) == 0) {
396 panic("bdwrite: buffer is not busy");
397 }
463 if ((bp->b_flags & B_BUSY) == 0) {
464 panic("bdwrite: buffer is not busy");
465 }
466#endif
467
398 if (bp->b_flags & B_INVAL) {
399 brelse(bp);
400 return;
401 }
402 if (bp->b_flags & B_TAPE) {
403 bawrite(bp);
404 return;
405 }
406 bp->b_flags &= ~(B_READ|B_RELBUF);
407 if ((bp->b_flags & B_DELWRI) == 0) {
408 bp->b_flags |= B_DONE | B_DELWRI;
409 reassignbuf(bp, bp->b_vp);
468 if (bp->b_flags & B_INVAL) {
469 brelse(bp);
470 return;
471 }
472 if (bp->b_flags & B_TAPE) {
473 bawrite(bp);
474 return;
475 }
476 bp->b_flags &= ~(B_READ|B_RELBUF);
477 if ((bp->b_flags & B_DELWRI) == 0) {
478 bp->b_flags |= B_DONE | B_DELWRI;
479 reassignbuf(bp, bp->b_vp);
480 ++numdirtybuffers;
410 }
411
412 /*
413 * This bmap keeps the system from needing to do the bmap later,
414 * perhaps when the system is attempting to do a sync. Since it
415 * is likely that the indirect block -- or whatever other datastructure
416 * that the filesystem needs is still in memory now, it is a good
417 * thing to do this. Note also, that if the pageout daemon is

--- 13 unchanged lines hidden (view full) ---

431 * We need to do this here to satisfy the vnode_pager and the
432 * pageout daemon, so that it thinks that the pages have been
433 * "cleaned". Note that since the pages are in a delayed write
434 * buffer -- the VFS layer "will" see that the pages get written
435 * out on the next sync, or perhaps the cluster will be completed.
436 */
437 vfs_clean_pages(bp);
438 bqrelse(bp);
481 }
482
483 /*
484 * This bmap keeps the system from needing to do the bmap later,
485 * perhaps when the system is attempting to do a sync. Since it
486 * is likely that the indirect block -- or whatever other datastructure
487 * that the filesystem needs is still in memory now, it is a good
488 * thing to do this. Note also, that if the pageout daemon is

--- 13 unchanged lines hidden (view full) ---

502 * We need to do this here to satisfy the vnode_pager and the
503 * pageout daemon, so that it thinks that the pages have been
504 * "cleaned". Note that since the pages are in a delayed write
505 * buffer -- the VFS layer "will" see that the pages get written
506 * out on the next sync, or perhaps the cluster will be completed.
507 */
508 vfs_clean_pages(bp);
509 bqrelse(bp);
510
511 if (numdirtybuffers >= hidirtybuffers)
512 flushdirtybuffers(0, 0);
513
439 return;
440}
441
442/*
443 * Asynchronous write.
444 * Start output on a buffer, but do not wait for it to complete.
445 * The buffer is released when the output completes.
446 */

--- 41 unchanged lines hidden (view full) ---

488 }
489
490 if (bp->b_flags & B_LOCKED)
491 bp->b_flags &= ~B_ERROR;
492
493 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
494 (bp->b_bufsize <= 0)) {
495 bp->b_flags |= B_INVAL;
514 return;
515}
516
517/*
518 * Asynchronous write.
519 * Start output on a buffer, but do not wait for it to complete.
520 * The buffer is released when the output completes.
521 */

--- 41 unchanged lines hidden (view full) ---

563 }
564
565 if (bp->b_flags & B_LOCKED)
566 bp->b_flags &= ~B_ERROR;
567
568 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
569 (bp->b_bufsize <= 0)) {
570 bp->b_flags |= B_INVAL;
571 if (bp->b_flags & B_DELWRI)
572 --numdirtybuffers;
496 bp->b_flags &= ~(B_DELWRI | B_CACHE);
497 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
498 if (bp->b_bufsize)
499 allocbuf(bp, 0);
500 brelvp(bp);
501 }
502 }
503

--- 24 unchanged lines hidden (view full) ---

528 vm_ooffset_t foff;
529 vm_object_t obj;
530 int i, resid;
531 vm_page_t m;
532 struct vnode *vp;
533 int iototal = bp->b_bufsize;
534
535 vp = bp->b_vp;
573 bp->b_flags &= ~(B_DELWRI | B_CACHE);
574 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
575 if (bp->b_bufsize)
576 allocbuf(bp, 0);
577 brelvp(bp);
578 }
579 }
580

--- 24 unchanged lines hidden (view full) ---

605 vm_ooffset_t foff;
606 vm_object_t obj;
607 int i, resid;
608 vm_page_t m;
609 struct vnode *vp;
610 int iototal = bp->b_bufsize;
611
612 vp = bp->b_vp;
613
614#if !defined(MAX_PERF)
536 if (!vp)
537 panic("brelse: missing vp");
615 if (!vp)
616 panic("brelse: missing vp");
617#endif
538
539 if (bp->b_npages) {
540 vm_pindex_t poff;
541 obj = (vm_object_t) vp->v_object;
542 if (vp->v_type == VBLK)
543 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
544 else
545 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
546 poff = OFF_TO_IDX(foff);
547 for (i = 0; i < bp->b_npages; i++) {
548 m = bp->b_pages[i];
549 if (m == bogus_page) {
550 m = vm_page_lookup(obj, poff + i);
618
619 if (bp->b_npages) {
620 vm_pindex_t poff;
621 obj = (vm_object_t) vp->v_object;
622 if (vp->v_type == VBLK)
623 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
624 else
625 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
626 poff = OFF_TO_IDX(foff);
627 for (i = 0; i < bp->b_npages; i++) {
628 m = bp->b_pages[i];
629 if (m == bogus_page) {
630 m = vm_page_lookup(obj, poff + i);
631#if !defined(MAX_PERF)
551 if (!m) {
552 panic("brelse: page missing\n");
553 }
632 if (!m) {
633 panic("brelse: page missing\n");
634 }
635#endif
554 bp->b_pages[i] = m;
555 pmap_qenter(trunc_page(bp->b_data),
556 bp->b_pages, bp->b_npages);
557 }
558 resid = IDX_TO_OFF(m->pindex+1) - foff;
559 if (resid > iototal)
560 resid = iototal;
561 if (resid > 0) {

--- 23 unchanged lines hidden (view full) ---

585 }
586 foff += resid;
587 iototal -= resid;
588 }
589 }
590 if (bp->b_flags & (B_INVAL | B_RELBUF))
591 vfs_vmio_release(bp);
592 }
636 bp->b_pages[i] = m;
637 pmap_qenter(trunc_page(bp->b_data),
638 bp->b_pages, bp->b_npages);
639 }
640 resid = IDX_TO_OFF(m->pindex+1) - foff;
641 if (resid > iototal)
642 resid = iototal;
643 if (resid > 0) {

--- 23 unchanged lines hidden (view full) ---

667 }
668 foff += resid;
669 iototal -= resid;
670 }
671 }
672 if (bp->b_flags & (B_INVAL | B_RELBUF))
673 vfs_vmio_release(bp);
674 }
675#if !defined(MAX_PERF)
593 if (bp->b_qindex != QUEUE_NONE)
594 panic("brelse: free buffer onto another queue???");
676 if (bp->b_qindex != QUEUE_NONE)
677 panic("brelse: free buffer onto another queue???");
678#endif
595
596 /* enqueue */
597 /* buffers with no memory */
598 if (bp->b_bufsize == 0) {
679
680 /* enqueue */
681 /* buffers with no memory */
682 if (bp->b_bufsize == 0) {
683 bp->b_flags |= B_INVAL;
599 bp->b_qindex = QUEUE_EMPTY;
600 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
601 LIST_REMOVE(bp, b_hash);
602 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
603 bp->b_dev = NODEV;
604 /*
605 * Get rid of the kva allocation *now*
606 */
607 bfreekva(bp);
684 bp->b_qindex = QUEUE_EMPTY;
685 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
686 LIST_REMOVE(bp, b_hash);
687 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
688 bp->b_dev = NODEV;
689 /*
690 * Get rid of the kva allocation *now*
691 */
692 bfreekva(bp);
608 if (needsbuffer) {
609 wakeup(&needsbuffer);
610 needsbuffer=0;
611 }
612 /* buffers with junk contents */
693
694 /* buffers with junk contents */
613 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
695 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
696 bp->b_flags |= B_INVAL;
614 bp->b_qindex = QUEUE_AGE;
615 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
616 LIST_REMOVE(bp, b_hash);
617 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
618 bp->b_dev = NODEV;
697 bp->b_qindex = QUEUE_AGE;
698 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
699 LIST_REMOVE(bp, b_hash);
700 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
701 bp->b_dev = NODEV;
619 if (needsbuffer) {
620 wakeup(&needsbuffer);
621 needsbuffer=0;
622 }
623 /* buffers that are locked */
702
703 /* buffers that are locked */
624 } else if (bp->b_flags & B_LOCKED) {
625 bp->b_qindex = QUEUE_LOCKED;
626 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
704 } else if (bp->b_flags & B_LOCKED) {
705 bp->b_qindex = QUEUE_LOCKED;
706 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
627 /* buffers with stale but valid contents */
707
708 /* buffers with stale but valid contents */
628 } else if (bp->b_flags & B_AGE) {
629 bp->b_qindex = QUEUE_AGE;
630 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
709 } else if (bp->b_flags & B_AGE) {
710 bp->b_qindex = QUEUE_AGE;
711 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
631 if (needsbuffer) {
632 wakeup(&needsbuffer);
633 needsbuffer=0;
634 }
635 /* buffers with valid and quite potentially reuseable contents */
712
713 /* buffers with valid and quite potentially reuseable contents */
636 } else {
637 bp->b_qindex = QUEUE_LRU;
638 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
714 } else {
715 bp->b_qindex = QUEUE_LRU;
716 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
639 if (needsbuffer) {
640 wakeup(&needsbuffer);
641 needsbuffer=0;
717 }
718
719 if ((bp->b_flags & B_INVAL) ||
720 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) {
721 if (bp->b_flags & B_DELWRI) {
722 --numdirtybuffers;
723 bp->b_flags &= ~B_DELWRI;
642 }
724 }
725 vfs_bio_need_satisfy();
643 }
644
645 /* unlock */
646 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
647 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
648 splx(s);
649}
650
651/*
652 * Release a buffer.
653 */
654void
655bqrelse(struct buf * bp)
656{
657 int s;
658
659 s = splbio();
660
726 }
727
728 /* unlock */
729 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
730 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
731 splx(s);
732}
733
734/*
735 * Release a buffer.
736 */
737void
738bqrelse(struct buf * bp)
739{
740 int s;
741
742 s = splbio();
743
661
662 /* anyone need this block? */
663 if (bp->b_flags & B_WANTED) {
664 bp->b_flags &= ~(B_WANTED | B_AGE);
665 wakeup(bp);
666 }
667
744 /* anyone need this block? */
745 if (bp->b_flags & B_WANTED) {
746 bp->b_flags &= ~(B_WANTED | B_AGE);
747 wakeup(bp);
748 }
749
750#if !defined(MAX_PERF)
668 if (bp->b_qindex != QUEUE_NONE)
669 panic("bqrelse: free buffer onto another queue???");
751 if (bp->b_qindex != QUEUE_NONE)
752 panic("bqrelse: free buffer onto another queue???");
753#endif
670
671 if (bp->b_flags & B_LOCKED) {
672 bp->b_flags &= ~B_ERROR;
673 bp->b_qindex = QUEUE_LOCKED;
674 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
675 /* buffers with stale but valid contents */
676 } else {
677 bp->b_qindex = QUEUE_LRU;
678 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
754
755 if (bp->b_flags & B_LOCKED) {
756 bp->b_flags &= ~B_ERROR;
757 bp->b_qindex = QUEUE_LOCKED;
758 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
759 /* buffers with stale but valid contents */
760 } else {
761 bp->b_qindex = QUEUE_LRU;
762 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
679 if (needsbuffer) {
680 wakeup(&needsbuffer);
681 needsbuffer=0;
682 }
683 }
684
763 }
764
765 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) {
766 vfs_bio_need_satisfy();
767 }
768
685 /* unlock */
686 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
687 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
688 splx(s);
689}
690
691static void
692vfs_vmio_release(bp)

--- 156 unchanged lines hidden (view full) ---

849 return nwritten;
850}
851
852
853/*
854 * Find a buffer header which is available for use.
855 */
856static struct buf *
769 /* unlock */
770 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
771 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
772 splx(s);
773}
774
775static void
776vfs_vmio_release(bp)

--- 156 unchanged lines hidden (view full) ---

933 return nwritten;
934}
935
936
937/*
938 * Find a buffer header which is available for use.
939 */
940static struct buf *
857getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
941getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize)
858{
859 struct buf *bp;
860 int nbyteswritten = 0;
861 vm_offset_t addr;
942{
943 struct buf *bp;
944 int nbyteswritten = 0;
945 vm_offset_t addr;
946 static int writerecursion = 0;
862
863start:
864 if (bufspace >= maxbufspace)
865 goto trytofreespace;
866
867 /* can we constitute a new buffer? */
868 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
947
948start:
949 if (bufspace >= maxbufspace)
950 goto trytofreespace;
951
952 /* can we constitute a new buffer? */
953 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
954#if !defined(MAX_PERF)
869 if (bp->b_qindex != QUEUE_EMPTY)
870 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
871 bp->b_qindex);
955 if (bp->b_qindex != QUEUE_EMPTY)
956 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
957 bp->b_qindex);
958#endif
872 bp->b_flags |= B_BUSY;
873 bremfree(bp);
874 goto fillbuf;
875 }
876trytofreespace:
877 /*
878 * We keep the file I/O from hogging metadata I/O
879 * This is desirable because file data is cached in the
880 * VM/Buffer cache even if a buffer is freed.
881 */
882 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
959 bp->b_flags |= B_BUSY;
960 bremfree(bp);
961 goto fillbuf;
962 }
963trytofreespace:
964 /*
965 * We keep the file I/O from hogging metadata I/O
966 * This is desirable because file data is cached in the
967 * VM/Buffer cache even if a buffer is freed.
968 */
969 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
970#if !defined(MAX_PERF)
883 if (bp->b_qindex != QUEUE_AGE)
884 panic("getnewbuf: inconsistent AGE queue, qindex=%d",
885 bp->b_qindex);
971 if (bp->b_qindex != QUEUE_AGE)
972 panic("getnewbuf: inconsistent AGE queue, qindex=%d",
973 bp->b_qindex);
974#endif
886 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
975 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
976#if !defined(MAX_PERF)
887 if (bp->b_qindex != QUEUE_LRU)
888 panic("getnewbuf: inconsistent LRU queue, qindex=%d",
889 bp->b_qindex);
977 if (bp->b_qindex != QUEUE_LRU)
978 panic("getnewbuf: inconsistent LRU queue, qindex=%d",
979 bp->b_qindex);
980#endif
890 }
891 if (!bp) {
892 /* wait for a free buffer of any kind */
981 }
982 if (!bp) {
983 /* wait for a free buffer of any kind */
893 needsbuffer = 1;
984 needsbuffer |= VFS_BIO_NEED_ANY;
894 do
895 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf",
896 slptimeo);
985 do
986 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf",
987 slptimeo);
897 while (needsbuffer);
988 while (needsbuffer & VFS_BIO_NEED_ANY);
898 return (0);
899 }
900
901#if defined(DIAGNOSTIC)
902 if (bp->b_flags & B_BUSY) {
903 panic("getnewbuf: busy buffer on free list\n");
904 }
905#endif

--- 11 unchanged lines hidden (view full) ---

917 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
918 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
919 goto start;
920 }
921 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
922 }
923 }
924
989 return (0);
990 }
991
992#if defined(DIAGNOSTIC)
993 if (bp->b_flags & B_BUSY) {
994 panic("getnewbuf: busy buffer on free list\n");
995 }
996#endif

--- 11 unchanged lines hidden (view full) ---

1008 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
1009 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1010 goto start;
1011 }
1012 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1013 }
1014 }
1015
1016
925 /* if we are a delayed write, convert to an async write */
926 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
1017 /* if we are a delayed write, convert to an async write */
1018 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
927 nbyteswritten += vfs_bio_awrite(bp);
928 if (!slpflag && !slptimeo) {
929 return (0);
1019
1020 if (writerecursion > 0) {
1021 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1022 while (bp) {
1023 if ((bp->b_flags & B_DELWRI) == 0)
1024 break;
1025 bp = TAILQ_NEXT(bp, b_freelist);
1026 }
1027 if (bp == NULL) {
1028 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1029 while (bp) {
1030 if ((bp->b_flags & B_DELWRI) == 0)
1031 break;
1032 bp = TAILQ_NEXT(bp, b_freelist);
1033 }
1034 }
1035 if (bp == NULL)
1036 panic("getnewbuf: cannot get buffer, infinite recursion failure");
1037 } else {
1038 ++writerecursion;
1039 nbyteswritten += vfs_bio_awrite(bp);
1040 --writerecursion;
1041 if (!slpflag && !slptimeo) {
1042 return (0);
1043 }
1044 goto start;
930 }
1045 }
931 goto start;
932 }
933
934 if (bp->b_flags & B_WANTED) {
935 bp->b_flags &= ~B_WANTED;
936 wakeup(bp);
937 }
938 bremfree(bp);
939 bp->b_flags |= B_BUSY;

--- 76 unchanged lines hidden (view full) ---

1016 bp->b_kvabase = (caddr_t) addr;
1017 bp->b_kvasize = maxsize;
1018 }
1019 bp->b_data = bp->b_kvabase;
1020
1021 return (bp);
1022}
1023
1046 }
1047
1048 if (bp->b_flags & B_WANTED) {
1049 bp->b_flags &= ~B_WANTED;
1050 wakeup(bp);
1051 }
1052 bremfree(bp);
1053 bp->b_flags |= B_BUSY;

--- 76 unchanged lines hidden (view full) ---

1130 bp->b_kvabase = (caddr_t) addr;
1131 bp->b_kvasize = maxsize;
1132 }
1133 bp->b_data = bp->b_kvabase;
1134
1135 return (bp);
1136}
1137
1138static void
1139waitfreebuffers(int slpflag, int slptimeo) {
1140 while (numfreebuffers < hifreebuffers) {
1141 flushdirtybuffers(slpflag, slptimeo);
1142 if (numfreebuffers < hifreebuffers)
1143 break;
1144 needsbuffer |= VFS_BIO_NEED_FREE;
1145 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo))
1146 break;
1147 }
1148}
1149
1150static void
1151flushdirtybuffers(int slpflag, int slptimeo) {
1152 int s;
1153 static pid_t flushing = 0;
1154
1155 s = splbio();
1156
1157 if (flushing) {
1158 if (flushing == curproc->p_pid) {
1159 splx(s);
1160 return;
1161 }
1162 while (flushing) {
1163 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) {
1164 splx(s);
1165 return;
1166 }
1167 }
1168 }
1169 flushing = curproc->p_pid;
1170
1171 while (numdirtybuffers > lodirtybuffers) {
1172 struct buf *bp;
1173 needsbuffer |= VFS_BIO_NEED_LOWLIMIT;
1174 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1175 if (bp == NULL)
1176 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1177
1178 while (bp && ((bp->b_flags & B_DELWRI) == 0)) {
1179 bp = TAILQ_NEXT(bp, b_freelist);
1180 }
1181
1182 if (bp) {
1183 splx(s);
1184 vfs_bio_awrite(bp);
1185 s = splbio();
1186 continue;
1187 }
1188 break;
1189 }
1190
1191 flushing = 0;
1192 wakeup(&flushing);
1193 splx(s);
1194}
1195
1024/*
1025 * Check to see if a block is currently memory resident.
1026 */
1027struct buf *
1028incore(struct vnode * vp, daddr_t blkno)
1029{
1030 struct buf *bp;
1031

--- 104 unchanged lines hidden (view full) ---

1136 */
1137struct buf *
1138getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1139{
1140 struct buf *bp;
1141 int s;
1142 struct bufhashhdr *bh;
1143 int maxsize;
1196/*
1197 * Check to see if a block is currently memory resident.
1198 */
1199struct buf *
1200incore(struct vnode * vp, daddr_t blkno)
1201{
1202 struct buf *bp;
1203

--- 104 unchanged lines hidden (view full) ---

1308 */
1309struct buf *
1310getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1311{
1312 struct buf *bp;
1313 int s;
1314 struct bufhashhdr *bh;
1315 int maxsize;
1316 static pid_t flushing = 0;
1144
1145 if (vp->v_mount) {
1146 maxsize = vp->v_mount->mnt_stat.f_iosize;
1147 /*
1148 * This happens on mount points.
1149 */
1150 if (maxsize < size)
1151 maxsize = size;
1152 } else {
1153 maxsize = size;
1154 }
1155
1317
1318 if (vp->v_mount) {
1319 maxsize = vp->v_mount->mnt_stat.f_iosize;
1320 /*
1321 * This happens on mount points.
1322 */
1323 if (maxsize < size)
1324 maxsize = size;
1325 } else {
1326 maxsize = size;
1327 }
1328
1329#if !defined(MAX_PERF)
1156 if (size > MAXBSIZE)
1157 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1330 if (size > MAXBSIZE)
1331 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1332#endif
1158
1159 s = splbio();
1160loop:
1333
1334 s = splbio();
1335loop:
1336 if (numfreebuffers < lofreebuffers) {
1337 waitfreebuffers(slpflag, slptimeo);
1338 }
1339
1161 if ((bp = gbincore(vp, blkno))) {
1162 if (bp->b_flags & B_BUSY) {
1163 bp->b_flags |= B_WANTED;
1164 if (bp->b_usecount < BUF_MAXUSE)
1165 ++bp->b_usecount;
1166 if (!tsleep(bp,
1167 (PRIBIO + 1) | slpflag, "getblk", slptimeo))
1168 goto loop;
1169
1170 splx(s);
1171 return (struct buf *) NULL;
1172 }
1173 bp->b_flags |= B_BUSY | B_CACHE;
1174 bremfree(bp);
1340 if ((bp = gbincore(vp, blkno))) {
1341 if (bp->b_flags & B_BUSY) {
1342 bp->b_flags |= B_WANTED;
1343 if (bp->b_usecount < BUF_MAXUSE)
1344 ++bp->b_usecount;
1345 if (!tsleep(bp,
1346 (PRIBIO + 1) | slpflag, "getblk", slptimeo))
1347 goto loop;
1348
1349 splx(s);
1350 return (struct buf *) NULL;
1351 }
1352 bp->b_flags |= B_BUSY | B_CACHE;
1353 bremfree(bp);
1175
1354
1176 /*
1177 * check for size inconsistancies (note that they shouldn't happen
1178 * but do when filesystems don't handle the size changes correctly.)
1179 * We are conservative on metadata and don't just extend the buffer
1180 * but write and re-constitute it.
1181 */
1182
1183 if (bp->b_bcount != size) {

--- 8 unchanged lines hidden (view full) ---

1192
1193 if (bp->b_usecount < BUF_MAXUSE)
1194 ++bp->b_usecount;
1195 splx(s);
1196 return (bp);
1197 } else {
1198 vm_object_t obj;
1199
1355 /*
1356 * check for size inconsistancies (note that they shouldn't happen
1357 * but do when filesystems don't handle the size changes correctly.)
1358 * We are conservative on metadata and don't just extend the buffer
1359 * but write and re-constitute it.
1360 */
1361
1362 if (bp->b_bcount != size) {

--- 8 unchanged lines hidden (view full) ---

1371
1372 if (bp->b_usecount < BUF_MAXUSE)
1373 ++bp->b_usecount;
1374 splx(s);
1375 return (bp);
1376 } else {
1377 vm_object_t obj;
1378
1200 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == 0) {
1379 if ((bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize)) == 0) {
1201 if (slpflag || slptimeo) {
1202 splx(s);
1203 return NULL;
1204 }
1205 goto loop;
1206 }
1207
1208 /*

--- 46 unchanged lines hidden (view full) ---

1255 */
1256struct buf *
1257geteblk(int size)
1258{
1259 struct buf *bp;
1260 int s;
1261
1262 s = splbio();
1380 if (slpflag || slptimeo) {
1381 splx(s);
1382 return NULL;
1383 }
1384 goto loop;
1385 }
1386
1387 /*

--- 46 unchanged lines hidden (view full) ---

1434 */
1435struct buf *
1436geteblk(int size)
1437{
1438 struct buf *bp;
1439 int s;
1440
1441 s = splbio();
1263 while ((bp = getnewbuf(0, 0, size, MAXBSIZE)) == 0);
1442 while ((bp = getnewbuf(0, 0, 0, size, MAXBSIZE)) == 0);
1264 splx(s);
1265 allocbuf(bp, size);
1266 bp->b_flags |= B_INVAL;
1267 return (bp);
1268}
1269
1270
1271/*

--- 10 unchanged lines hidden (view full) ---

1282int
1283allocbuf(struct buf * bp, int size)
1284{
1285
1286 int s;
1287 int newbsize, mbsize;
1288 int i;
1289
1443 splx(s);
1444 allocbuf(bp, size);
1445 bp->b_flags |= B_INVAL;
1446 return (bp);
1447}
1448
1449
1450/*

--- 10 unchanged lines hidden (view full) ---

1461int
1462allocbuf(struct buf * bp, int size)
1463{
1464
1465 int s;
1466 int newbsize, mbsize;
1467 int i;
1468
1469#if !defined(MAX_PERF)
1290 if (!(bp->b_flags & B_BUSY))
1291 panic("allocbuf: buffer not busy");
1292
1293 if (bp->b_kvasize < size)
1294 panic("allocbuf: buffer too small");
1470 if (!(bp->b_flags & B_BUSY))
1471 panic("allocbuf: buffer not busy");
1472
1473 if (bp->b_kvasize < size)
1474 panic("allocbuf: buffer too small");
1475#endif
1295
1296 if ((bp->b_flags & B_VMIO) == 0) {
1297 caddr_t origbuf;
1298 int origbufsize;
1299 /*
1300 * Just get anonymous memory from the kernel
1301 */
1302 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);

--- 211 unchanged lines hidden (view full) ---

1514 bp->b_npages = curbpnpages;
1515 pmap_qenter((vm_offset_t) bp->b_data,
1516 bp->b_pages, bp->b_npages);
1517 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
1518 }
1519 }
1520 }
1521 if (bp->b_flags & B_VMIO)
1476
1477 if ((bp->b_flags & B_VMIO) == 0) {
1478 caddr_t origbuf;
1479 int origbufsize;
1480 /*
1481 * Just get anonymous memory from the kernel
1482 */
1483 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);

--- 211 unchanged lines hidden (view full) ---

1695 bp->b_npages = curbpnpages;
1696 pmap_qenter((vm_offset_t) bp->b_data,
1697 bp->b_pages, bp->b_npages);
1698 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
1699 }
1700 }
1701 }
1702 if (bp->b_flags & B_VMIO)
1522 vmiospace += bp->b_bufsize;
1703 vmiospace += (newbsize - bp->b_bufsize);
1523 bufspace += (newbsize - bp->b_bufsize);
1524 bp->b_bufsize = newbsize;
1525 bp->b_bcount = size;
1526 return 1;
1527}
1528
1529/*
1530 * Wait for buffer I/O completion, returning error status.

--- 24 unchanged lines hidden (view full) ---

1555 * is not *a good idea*.
1556 */
1557void
1558biodone(register struct buf * bp)
1559{
1560 int s;
1561
1562 s = splbio();
1704 bufspace += (newbsize - bp->b_bufsize);
1705 bp->b_bufsize = newbsize;
1706 bp->b_bcount = size;
1707 return 1;
1708}
1709
1710/*
1711 * Wait for buffer I/O completion, returning error status.

--- 24 unchanged lines hidden (view full) ---

1736 * is not *a good idea*.
1737 */
1738void
1739biodone(register struct buf * bp)
1740{
1741 int s;
1742
1743 s = splbio();
1744
1745#if !defined(MAX_PERF)
1563 if (!(bp->b_flags & B_BUSY))
1564 panic("biodone: buffer not busy");
1746 if (!(bp->b_flags & B_BUSY))
1747 panic("biodone: buffer not busy");
1748#endif
1565
1566 if (bp->b_flags & B_DONE) {
1567 splx(s);
1749
1750 if (bp->b_flags & B_DONE) {
1751 splx(s);
1752#if !defined(MAX_PERF)
1568 printf("biodone: buffer already done\n");
1753 printf("biodone: buffer already done\n");
1754#endif
1569 return;
1570 }
1571 bp->b_flags |= B_DONE;
1572
1573 if ((bp->b_flags & B_READ) == 0) {
1574 vwakeup(bp);
1575 }
1576#ifdef BOUNCE_BUFFERS

--- 16 unchanged lines hidden (view full) ---

1593 int iosize;
1594 struct vnode *vp = bp->b_vp;
1595
1596 if (vp->v_type == VBLK)
1597 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1598 else
1599 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1600 obj = vp->v_object;
1755 return;
1756 }
1757 bp->b_flags |= B_DONE;
1758
1759 if ((bp->b_flags & B_READ) == 0) {
1760 vwakeup(bp);
1761 }
1762#ifdef BOUNCE_BUFFERS

--- 16 unchanged lines hidden (view full) ---

1779 int iosize;
1780 struct vnode *vp = bp->b_vp;
1781
1782 if (vp->v_type == VBLK)
1783 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1784 else
1785 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1786 obj = vp->v_object;
1787#if !defined(MAX_PERF)
1601 if (!obj) {
1602 panic("biodone: no object");
1603 }
1788 if (!obj) {
1789 panic("biodone: no object");
1790 }
1791#endif
1604#if defined(VFS_BIO_DEBUG)
1605 if (obj->paging_in_progress < bp->b_npages) {
1606 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1607 obj->paging_in_progress, bp->b_npages);
1608 }
1609#endif
1610 iosize = bp->b_bufsize;
1611 for (i = 0; i < bp->b_npages; i++) {

--- 30 unchanged lines hidden (view full) ---

1642 }
1643
1644 /*
1645 * when debugging new filesystems or buffer I/O methods, this
1646 * is the most common error that pops up. if you see this, you
1647 * have not set the page busy flag correctly!!!
1648 */
1649 if (m->busy == 0) {
1792#if defined(VFS_BIO_DEBUG)
1793 if (obj->paging_in_progress < bp->b_npages) {
1794 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1795 obj->paging_in_progress, bp->b_npages);
1796 }
1797#endif
1798 iosize = bp->b_bufsize;
1799 for (i = 0; i < bp->b_npages; i++) {

--- 30 unchanged lines hidden (view full) ---

1830 }
1831
1832 /*
1833 * when debugging new filesystems or buffer I/O methods, this
1834 * is the most common error that pops up. if you see this, you
1835 * have not set the page busy flag correctly!!!
1836 */
1837 if (m->busy == 0) {
1838#if !defined(MAX_PERF)
1650 printf("biodone: page busy < 0, "
1651 "pindex: %d, foff: 0x(%x,%x), "
1652 "resid: %d, index: %d\n",
1653 (int) m->pindex, (int)(foff >> 32),
1654 (int) foff & 0xffffffff, resid, i);
1839 printf("biodone: page busy < 0, "
1840 "pindex: %d, foff: 0x(%x,%x), "
1841 "resid: %d, index: %d\n",
1842 (int) m->pindex, (int)(foff >> 32),
1843 (int) foff & 0xffffffff, resid, i);
1844#endif
1655 if (vp->v_type != VBLK)
1845 if (vp->v_type != VBLK)
1846#if !defined(MAX_PERF)
1656 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
1657 bp->b_vp->v_mount->mnt_stat.f_iosize,
1658 (int) bp->b_lblkno,
1659 bp->b_flags, bp->b_npages);
1660 else
1661 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1662 (int) bp->b_lblkno,
1663 bp->b_flags, bp->b_npages);
1664 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1665 m->valid, m->dirty, m->wire_count);
1847 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
1848 bp->b_vp->v_mount->mnt_stat.f_iosize,
1849 (int) bp->b_lblkno,
1850 bp->b_flags, bp->b_npages);
1851 else
1852 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1853 (int) bp->b_lblkno,
1854 bp->b_flags, bp->b_npages);
1855 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1856 m->valid, m->dirty, m->wire_count);
1857#endif
1666 panic("biodone: page busy < 0\n");
1667 }
1668 --m->busy;
1669 if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1670 m->flags &= ~PG_WANTED;
1671 wakeup(m);
1672 }
1673 --obj->paging_in_progress;

--- 84 unchanged lines hidden (view full) ---

1758
1759 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1760
1761 for (i = 0; i < bp->b_npages; i++) {
1762 vm_page_t m = bp->b_pages[i];
1763
1764 if (m == bogus_page) {
1765 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
1858 panic("biodone: page busy < 0\n");
1859 }
1860 --m->busy;
1861 if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1862 m->flags &= ~PG_WANTED;
1863 wakeup(m);
1864 }
1865 --obj->paging_in_progress;

--- 84 unchanged lines hidden (view full) ---

1950
1951 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1952
1953 for (i = 0; i < bp->b_npages; i++) {
1954 vm_page_t m = bp->b_pages[i];
1955
1956 if (m == bogus_page) {
1957 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
1958#if !defined(MAX_PERF)
1766 if (!m) {
1767 panic("vfs_unbusy_pages: page missing\n");
1768 }
1959 if (!m) {
1960 panic("vfs_unbusy_pages: page missing\n");
1961 }
1962#endif
1769 bp->b_pages[i] = m;
1770 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1771 }
1772 --obj->paging_in_progress;
1773 --m->busy;
1774 if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1775 m->flags &= ~PG_WANTED;
1776 wakeup(m);

--- 233 unchanged lines hidden (view full) ---

2010
2011 from = round_page(from);
2012 to = round_page(to);
2013 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
2014
2015 for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2016 p = bp->b_pages[index];
2017 if (p && (index < bp->b_npages)) {
1963 bp->b_pages[i] = m;
1964 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1965 }
1966 --obj->paging_in_progress;
1967 --m->busy;
1968 if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1969 m->flags &= ~PG_WANTED;
1970 wakeup(m);

--- 233 unchanged lines hidden (view full) ---

2204
2205 from = round_page(from);
2206 to = round_page(to);
2207 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
2208
2209 for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2210 p = bp->b_pages[index];
2211 if (p && (index < bp->b_npages)) {
2212#if !defined(MAX_PERF)
2018 if (p->busy) {
2019 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2020 bp->b_blkno, bp->b_lblkno);
2021 }
2213 if (p->busy) {
2214 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2215 bp->b_blkno, bp->b_lblkno);
2216 }
2217#endif
2022 bp->b_pages[index] = NULL;
2023 pmap_kremove(pg);
2024 vm_page_unwire(p);
2025 vm_page_free(p);
2026 }
2027 }
2028 bp->b_npages = from >> PAGE_SHIFT;
2029}

--- 29 unchanged lines hidden ---
2218 bp->b_pages[index] = NULL;
2219 pmap_kremove(pg);
2220 vm_page_unwire(p);
2221 vm_page_free(p);
2222 }
2223 }
2224 bp->b_npages = from >> PAGE_SHIFT;
2225}

--- 29 unchanged lines hidden ---