vfs_bio.c revision 12110
1285SN/A/*
2461SN/A * Copyright (c) 1994 John S. Dyson
3285SN/A * All rights reserved.
4285SN/A *
5285SN/A * Redistribution and use in source and binary forms, with or without
6285SN/A * modification, are permitted provided that the following conditions
7285SN/A * are met:
8285SN/A * 1. Redistributions of source code must retain the above copyright
9285SN/A *    notice immediately at the beginning of the file, without modification,
10285SN/A *    this list of conditions, and the following disclaimer.
11285SN/A * 2. Redistributions in binary form must reproduce the above copyright
12285SN/A *    notice, this list of conditions and the following disclaimer in the
13285SN/A *    documentation and/or other materials provided with the distribution.
14285SN/A * 3. Absolutely no warranty of function or purpose is made by the author
15285SN/A *    John S. Dyson.
16285SN/A * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17285SN/A *    is allowed if this notation is included.
18285SN/A * 5. Modifications may be freely made to this file if the above conditions
19285SN/A *    are met.
20285SN/A *
21285SN/A * $Id: vfs_bio.c,v 1.68 1995/10/29 15:31:13 phk Exp $
22285SN/A */
23285SN/A
24285SN/A/*
25285SN/A * this file contains a new buffer I/O scheme implementing a coherent
26285SN/A * VM object and buffer cache scheme.  Pains have been taken to make
27285SN/A * sure that the performance degradation associated with schemes such
28285SN/A * as this is not realized.
29285SN/A *
30285SN/A * Author:  John S. Dyson
31285SN/A * Significant help during the development and debugging phases
32285SN/A * had been provided by David Greenman, also of the FreeBSD core team.
33285SN/A */
34285SN/A
35285SN/A#define VMIO
36285SN/A#include <sys/param.h>
37285SN/A#include <sys/systm.h>
38285SN/A#include <sys/sysproto.h>
39285SN/A#include <sys/kernel.h>
40285SN/A#include <sys/proc.h>
41285SN/A#include <sys/vnode.h>
42285SN/A#include <vm/vm.h>
43285SN/A#include <vm/vm_kern.h>
44285SN/A#include <vm/vm_pageout.h>
45285SN/A#include <vm/vm_page.h>
46285SN/A#include <vm/vm_object.h>
47285SN/A#include <sys/buf.h>
48285SN/A#include <sys/mount.h>
49285SN/A#include <sys/malloc.h>
50285SN/A#include <sys/resourcevar.h>
51285SN/A#include <sys/proc.h>
52285SN/A
53285SN/A#include <miscfs/specfs/specdev.h>
54285SN/A
55285SN/A/*
56285SN/A * System initialization
57285SN/A */
58285SN/A
59285SN/Astatic void vfs_update __P((void));
60285SN/Astruct	proc *updateproc;
61285SN/A
62285SN/Astatic struct kproc_desc up_kp = {
63285SN/A	"update",
64285SN/A	vfs_update,
65285SN/A	&updateproc
66285SN/A};
67285SN/ASYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
68285SN/A
69285SN/A
70285SN/Astruct buf *buf;		/* buffer header pool */
71285SN/Astruct swqueue bswlist;
72285SN/A
73285SN/Avoid vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
74285SN/Avoid vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to);
75285SN/Avoid vfs_clean_pages(struct buf * bp);
76285SN/Astatic void vfs_setdirty(struct buf *bp);
77285SN/Astatic __inline struct buf * gbincore(struct vnode * vp, daddr_t blkno);
78285SN/A
79285SN/Aint needsbuffer;
80285SN/A
81285SN/A/*
82285SN/A * Internal update daemon, process 3
83285SN/A *	The variable vfs_update_wakeup allows for internal syncs.
84285SN/A */
85285SN/Aint vfs_update_wakeup;
86285SN/A
87285SN/A
88285SN/A/*
89285SN/A * buffers base kva
90285SN/A */
91285SN/Acaddr_t buffers_kva;
92285SN/A
93285SN/A/*
94285SN/A * bogus page -- for I/O to/from partially complete buffers
95285SN/A * this is a temporary solution to the problem, but it is not
96285SN/A * really that bad.  it would be better to split the buffer
97285SN/A * for input in the case of buffers partially already in memory,
98285SN/A * but the code is intricate enough already.
99285SN/A */
100285SN/Avm_page_t bogus_page;
101285SN/Avm_offset_t bogus_offset;
102285SN/A
103285SN/Aint bufspace, maxbufspace;
104285SN/A
105285SN/A/*
106285SN/A * advisory minimum for size of LRU queue or VMIO queue
107285SN/A */
108285SN/Aint minbuf;
109285SN/A
110285SN/Astruct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
111285SN/Astruct bqueues bufqueues[BUFFER_QUEUES];
112285SN/A
113285SN/A/*
114285SN/A * Initialize buffer headers and related structures.
115285SN/A */
116285SN/Avoid
117285SN/Abufinit()
118285SN/A{
119285SN/A	struct buf *bp;
120285SN/A	int i;
121285SN/A
122285SN/A	TAILQ_INIT(&bswlist);
123285SN/A	LIST_INIT(&invalhash);
124285SN/A
125285SN/A	/* first, make a null hash table */
126285SN/A	for (i = 0; i < BUFHSZ; i++)
127285SN/A		LIST_INIT(&bufhashtbl[i]);
128285SN/A
129285SN/A	/* next, make a null set of free lists */
130285SN/A	for (i = 0; i < BUFFER_QUEUES; i++)
131285SN/A		TAILQ_INIT(&bufqueues[i]);
132285SN/A
133285SN/A	buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
134285SN/A	/* finally, initialize each buffer header and stick on empty q */
135285SN/A	for (i = 0; i < nbuf; i++) {
136285SN/A		bp = &buf[i];
137285SN/A		bzero(bp, sizeof *bp);
138285SN/A		bp->b_flags = B_INVAL;	/* we're just an empty header */
139285SN/A		bp->b_dev = NODEV;
140285SN/A		bp->b_rcred = NOCRED;
141285SN/A		bp->b_wcred = NOCRED;
142285SN/A		bp->b_qindex = QUEUE_EMPTY;
143285SN/A		bp->b_vnbufs.le_next = NOLIST;
144285SN/A		bp->b_data = buffers_kva + i * MAXBSIZE;
145285SN/A		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
146285SN/A		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
147285SN/A	}
148285SN/A/*
149285SN/A * maxbufspace is currently calculated to support all filesystem blocks
150285SN/A * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
151285SN/A * cache is still the same as it would be for 8K filesystems.  This
152285SN/A * keeps the size of the buffer cache "in check" for big block filesystems.
153285SN/A */
154285SN/A	minbuf = nbuf / 3;
155285SN/A	maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
156285SN/A
157285SN/A	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
158285SN/A	bogus_page = vm_page_alloc(kernel_object,
159285SN/A			bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL);
160285SN/A
161285SN/A}
162285SN/A
163285SN/A/*
164285SN/A * remove the buffer from the appropriate free list
165285SN/A */
166285SN/Avoid
167285SN/Abremfree(struct buf * bp)
168285SN/A{
169285SN/A	int s = splbio();
170285SN/A
171285SN/A	if (bp->b_qindex != QUEUE_NONE) {
172285SN/A		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
173285SN/A		bp->b_qindex = QUEUE_NONE;
174285SN/A	} else {
175285SN/A		panic("bremfree: removing a buffer when not on a queue");
176285SN/A	}
177285SN/A	splx(s);
178285SN/A}
179285SN/A
180285SN/A/*
181285SN/A * Get a buffer with the specified data.  Look in the cache first.
182285SN/A */
183285SN/Aint
184285SN/Abread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
185285SN/A    struct buf ** bpp)
186285SN/A{
187285SN/A	struct buf *bp;
188285SN/A
189285SN/A	bp = getblk(vp, blkno, size, 0, 0);
190285SN/A	*bpp = bp;
191285SN/A
192285SN/A	/* if not found in cache, do some I/O */
193285SN/A	if ((bp->b_flags & B_CACHE) == 0) {
194285SN/A		if (curproc != NULL)
195285SN/A			curproc->p_stats->p_ru.ru_inblock++;
196285SN/A		bp->b_flags |= B_READ;
197285SN/A		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
198285SN/A		if (bp->b_rcred == NOCRED) {
199285SN/A			if (cred != NOCRED)
200285SN/A				crhold(cred);
201285SN/A			bp->b_rcred = cred;
202285SN/A		}
203285SN/A		vfs_busy_pages(bp, 0);
204285SN/A		VOP_STRATEGY(bp);
205285SN/A		return (biowait(bp));
206285SN/A	}
207285SN/A	return (0);
208285SN/A}
209285SN/A
210285SN/A/*
211285SN/A * Operates like bread, but also starts asynchronous I/O on
212285SN/A * read-ahead blocks.
213285SN/A */
214285SN/Aint
215285SN/Abreadn(struct vnode * vp, daddr_t blkno, int size,
216285SN/A    daddr_t * rablkno, int *rabsize,
217285SN/A    int cnt, struct ucred * cred, struct buf ** bpp)
218285SN/A{
219285SN/A	struct buf *bp, *rabp;
220285SN/A	int i;
221285SN/A	int rv = 0, readwait = 0;
222285SN/A
223285SN/A	*bpp = bp = getblk(vp, blkno, size, 0, 0);
224285SN/A
225285SN/A	/* if not found in cache, do some I/O */
226	if ((bp->b_flags & B_CACHE) == 0) {
227		if (curproc != NULL)
228			curproc->p_stats->p_ru.ru_inblock++;
229		bp->b_flags |= B_READ;
230		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
231		if (bp->b_rcred == NOCRED) {
232			if (cred != NOCRED)
233				crhold(cred);
234			bp->b_rcred = cred;
235		}
236		vfs_busy_pages(bp, 0);
237		VOP_STRATEGY(bp);
238		++readwait;
239	}
240	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
241		if (inmem(vp, *rablkno))
242			continue;
243		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
244
245		if ((rabp->b_flags & B_CACHE) == 0) {
246			if (curproc != NULL)
247				curproc->p_stats->p_ru.ru_inblock++;
248			rabp->b_flags |= B_READ | B_ASYNC;
249			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
250			if (rabp->b_rcred == NOCRED) {
251				if (cred != NOCRED)
252					crhold(cred);
253				rabp->b_rcred = cred;
254			}
255			vfs_busy_pages(rabp, 0);
256			VOP_STRATEGY(rabp);
257		} else {
258			brelse(rabp);
259		}
260	}
261
262	if (readwait) {
263		rv = biowait(bp);
264	}
265	return (rv);
266}
267
268/*
269 * Write, release buffer on completion.  (Done by iodone
270 * if async.)
271 */
272int
273bwrite(struct buf * bp)
274{
275	int oldflags = bp->b_flags;
276
277	if (bp->b_flags & B_INVAL) {
278		brelse(bp);
279		return (0);
280	}
281	if (!(bp->b_flags & B_BUSY))
282		panic("bwrite: buffer is not busy???");
283
284	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
285	bp->b_flags |= B_WRITEINPROG;
286
287	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
288		reassignbuf(bp, bp->b_vp);
289	}
290
291	bp->b_vp->v_numoutput++;
292	vfs_busy_pages(bp, 1);
293	if (curproc != NULL)
294		curproc->p_stats->p_ru.ru_oublock++;
295	VOP_STRATEGY(bp);
296
297	if ((oldflags & B_ASYNC) == 0) {
298		int rtval = biowait(bp);
299
300		if (oldflags & B_DELWRI) {
301			reassignbuf(bp, bp->b_vp);
302		}
303		brelse(bp);
304		return (rtval);
305	}
306	return (0);
307}
308
309int
310vn_bwrite(ap)
311	struct vop_bwrite_args *ap;
312{
313	return (bwrite(ap->a_bp));
314}
315
316/*
317 * Delayed write. (Buffer is marked dirty).
318 */
319void
320bdwrite(struct buf * bp)
321{
322
323	if ((bp->b_flags & B_BUSY) == 0) {
324		panic("bdwrite: buffer is not busy");
325	}
326	if (bp->b_flags & B_INVAL) {
327		brelse(bp);
328		return;
329	}
330	if (bp->b_flags & B_TAPE) {
331		bawrite(bp);
332		return;
333	}
334	bp->b_flags &= ~(B_READ|B_RELBUF);
335	if ((bp->b_flags & B_DELWRI) == 0) {
336		bp->b_flags |= B_DONE | B_DELWRI;
337		reassignbuf(bp, bp->b_vp);
338	}
339
340	/*
341	 * This bmap keeps the system from needing to do the bmap later,
342	 * perhaps when the system is attempting to do a sync.  Since it
343	 * is likely that the indirect block -- or whatever other datastructure
344	 * that the filesystem needs is still in memory now, it is a good
345	 * thing to do this.  Note also, that if the pageout daemon is
346	 * requesting a sync -- there might not be enough memory to do
347	 * the bmap then...  So, this is important to do.
348	 */
349	if( bp->b_lblkno == bp->b_blkno) {
350		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
351	}
352
353	/*
354	 * Set the *dirty* buffer range based upon the VM system dirty pages.
355	 */
356	vfs_setdirty(bp);
357
358	/*
359	 * We need to do this here to satisfy the vnode_pager and the
360	 * pageout daemon, so that it thinks that the pages have been
361	 * "cleaned".  Note that since the pages are in a delayed write
362	 * buffer -- the VFS layer "will" see that the pages get written
363	 * out on the next sync, or perhaps the cluster will be completed.
364	 */
365	vfs_clean_pages(bp);
366	brelse(bp);
367	return;
368}
369
370/*
371 * Asynchronous write.
372 * Start output on a buffer, but do not wait for it to complete.
373 * The buffer is released when the output completes.
374 */
375void
376bawrite(struct buf * bp)
377{
378	bp->b_flags |= B_ASYNC;
379	(void) VOP_BWRITE(bp);
380}
381
382/*
383 * Release a buffer.
384 */
385void
386brelse(struct buf * bp)
387{
388	int s;
389
390	if (bp->b_flags & B_CLUSTER) {
391		relpbuf(bp);
392		return;
393	}
394	/* anyone need a "free" block? */
395	s = splbio();
396
397	if (needsbuffer) {
398		needsbuffer = 0;
399		wakeup(&needsbuffer);
400	}
401
402	/* anyone need this block? */
403	if (bp->b_flags & B_WANTED) {
404		bp->b_flags &= ~(B_WANTED | B_AGE);
405		wakeup(bp);
406	} else if (bp->b_flags & B_VMIO) {
407		bp->b_flags &= ~B_WANTED;
408		wakeup(bp);
409	}
410	if (bp->b_flags & B_LOCKED)
411		bp->b_flags &= ~B_ERROR;
412
413	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
414	    (bp->b_bufsize <= 0)) {
415		bp->b_flags |= B_INVAL;
416		bp->b_flags &= ~(B_DELWRI | B_CACHE);
417		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp)
418			brelvp(bp);
419	}
420
421	/*
422	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
423	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
424	 * but the VM object is kept around.  The B_NOCACHE flag is used to
425	 * invalidate the pages in the VM object.
426	 */
427	if (bp->b_flags & B_VMIO) {
428		vm_offset_t foff;
429		vm_object_t obj;
430		int i, resid;
431		vm_page_t m;
432		struct vnode *vp;
433		int iototal = bp->b_bufsize;
434
435		vp = bp->b_vp;
436		if (!vp)
437			panic("brelse: missing vp");
438		if (!vp->v_mount)
439			panic("brelse: missing mount info");
440
441		if (bp->b_npages) {
442			obj = (vm_object_t) vp->v_object;
443			foff = trunc_page(vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno);
444			for (i = 0; i < bp->b_npages; i++) {
445				m = bp->b_pages[i];
446				if (m == bogus_page) {
447					m = vm_page_lookup(obj, foff);
448					if (!m) {
449						panic("brelse: page missing\n");
450					}
451					bp->b_pages[i] = m;
452					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
453				}
454				resid = (m->offset + PAGE_SIZE) - foff;
455				if (resid > iototal)
456					resid = iototal;
457				if (resid > 0) {
458					/*
459					 * Don't invalidate the page if the local machine has already
460					 * modified it.  This is the lesser of two evils, and should
461					 * be fixed.
462					 */
463					if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
464						vm_page_test_dirty(m);
465						if (m->dirty == 0) {
466							vm_page_set_invalid(m, foff, resid);
467							if (m->valid == 0)
468								vm_page_protect(m, VM_PROT_NONE);
469						}
470					}
471				}
472				foff += resid;
473				iototal -= resid;
474			}
475		}
476
477		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
478			for(i = 0; i < bp->b_npages; i++) {
479				m = bp->b_pages[i];
480				--m->bmapped;
481				if (m->bmapped == 0) {
482					if (m->flags & PG_WANTED) {
483						wakeup(m);
484						m->flags &= ~PG_WANTED;
485					}
486					if ((m->busy == 0) && ((m->flags & PG_BUSY) == 0)) {
487						vm_page_test_dirty(m);
488						/*
489						 * if page isn't valid, no sense in keeping it around
490						 */
491						if (m->valid == 0) {
492							vm_page_protect(m, VM_PROT_NONE);
493							vm_page_free(m);
494						/*
495						 * if page isn't dirty and hasn't been referenced by
496						 * a process, then cache it
497						 */
498						} else if ((m->dirty & m->valid) == 0 &&
499						    (m->flags & PG_REFERENCED) == 0 &&
500						    !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
501							vm_page_cache(m);
502						/*
503						 * otherwise activate it
504						 */
505						} else if ((m->flags & PG_ACTIVE) == 0) {
506							vm_page_activate(m);
507							m->act_count = 0;
508						}
509					}
510				}
511			}
512			bufspace -= bp->b_bufsize;
513			pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
514			bp->b_npages = 0;
515			bp->b_bufsize = 0;
516			bp->b_flags &= ~B_VMIO;
517			if (bp->b_vp)
518				brelvp(bp);
519		}
520	}
521	if (bp->b_qindex != QUEUE_NONE)
522		panic("brelse: free buffer onto another queue???");
523
524	/* enqueue */
525	/* buffers with no memory */
526	if (bp->b_bufsize == 0) {
527		bp->b_qindex = QUEUE_EMPTY;
528		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
529		LIST_REMOVE(bp, b_hash);
530		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
531		bp->b_dev = NODEV;
532		/* buffers with junk contents */
533	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
534		bp->b_qindex = QUEUE_AGE;
535		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
536		LIST_REMOVE(bp, b_hash);
537		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
538		bp->b_dev = NODEV;
539		/* buffers that are locked */
540	} else if (bp->b_flags & B_LOCKED) {
541		bp->b_qindex = QUEUE_LOCKED;
542		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
543		/* buffers with stale but valid contents */
544	} else if (bp->b_flags & B_AGE) {
545		bp->b_qindex = QUEUE_AGE;
546		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
547		/* buffers with valid and quite potentially reuseable contents */
548	} else {
549		bp->b_qindex = QUEUE_LRU;
550		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
551	}
552
553	/* unlock */
554	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
555	splx(s);
556}
557
558/*
559 * Check to see if a block is currently memory resident.
560 */
561static __inline struct buf *
562gbincore(struct vnode * vp, daddr_t blkno)
563{
564	struct buf *bp;
565	struct bufhashhdr *bh;
566
567	bh = BUFHASH(vp, blkno);
568	bp = bh->lh_first;
569
570	/* Search hash chain */
571	while (bp != NULL) {
572		/* hit */
573		if (bp->b_vp == vp && bp->b_lblkno == blkno) {
574			break;
575		}
576		bp = bp->b_hash.le_next;
577	}
578	return (bp);
579}
580
581/*
582 * this routine implements clustered async writes for
583 * clearing out B_DELWRI buffers...  This is much better
584 * than the old way of writing only one buffer at a time.
585 */
586void
587vfs_bio_awrite(struct buf * bp)
588{
589	int i;
590	daddr_t lblkno = bp->b_lblkno;
591	struct vnode *vp = bp->b_vp;
592	int s;
593	int ncl;
594	struct buf *bpa;
595
596	s = splbio();
597	if (vp->v_mount && (vp->v_flag & VVMIO) &&
598	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
599		int size = vp->v_mount->mnt_stat.f_iosize;
600		int maxcl = MAXPHYS / size;
601
602		for (i = 1; i < maxcl; i++) {
603			if ((bpa = gbincore(vp, lblkno + i)) &&
604			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
605			    (B_DELWRI | B_CLUSTEROK)) &&
606			    (bpa->b_bufsize == size)) {
607				if ((bpa->b_blkno == bpa->b_lblkno) ||
608				    (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE))
609					break;
610			} else {
611				break;
612			}
613		}
614		ncl = i;
615		/*
616		 * this is a possible cluster write
617		 */
618		if (ncl != 1) {
619			bremfree(bp);
620			cluster_wbuild(vp, bp, size, lblkno, ncl, -1);
621			splx(s);
622			return;
623		}
624	}
625	/*
626	 * default (old) behavior, writing out only one block
627	 */
628	bremfree(bp);
629	bp->b_flags |= B_BUSY | B_ASYNC;
630	(void) VOP_BWRITE(bp);
631	splx(s);
632}
633
634
635/*
636 * Find a buffer header which is available for use.
637 */
638static struct buf *
639getnewbuf(int slpflag, int slptimeo, int doingvmio)
640{
641	struct buf *bp;
642	int s;
643
644	s = splbio();
645start:
646	if (bufspace >= maxbufspace)
647		goto trytofreespace;
648
649	/* can we constitute a new buffer? */
650	if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) {
651		if (bp->b_qindex != QUEUE_EMPTY)
652			panic("getnewbuf: inconsistent EMPTY queue");
653		bremfree(bp);
654		goto fillbuf;
655	}
656trytofreespace:
657	/*
658	 * We keep the file I/O from hogging metadata I/O
659	 * This is desirable because file data is cached in the
660	 * VM/Buffer cache even if a buffer is freed.
661	 */
662	if ((bp = bufqueues[QUEUE_AGE].tqh_first)) {
663		if (bp->b_qindex != QUEUE_AGE)
664			panic("getnewbuf: inconsistent AGE queue");
665	} else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) {
666		if (bp->b_qindex != QUEUE_LRU)
667			panic("getnewbuf: inconsistent LRU queue");
668	}
669	if (!bp) {
670		/* wait for a free buffer of any kind */
671		needsbuffer = 1;
672		tsleep(&needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo);
673		splx(s);
674		return (0);
675	}
676
677	/* if we are a delayed write, convert to an async write */
678	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
679		vfs_bio_awrite(bp);
680		if (!slpflag && !slptimeo) {
681			splx(s);
682			return (0);
683		}
684		goto start;
685	}
686
687	if (bp->b_flags & B_WANTED) {
688		bp->b_flags &= ~B_WANTED;
689		wakeup(bp);
690	}
691	bremfree(bp);
692
693	if (bp->b_flags & B_VMIO) {
694		bp->b_flags |= B_RELBUF | B_BUSY | B_DONE;
695		brelse(bp);
696		bremfree(bp);
697	}
698
699	if (bp->b_vp)
700		brelvp(bp);
701
702	/* we are not free, nor do we contain interesting data */
703	if (bp->b_rcred != NOCRED)
704		crfree(bp->b_rcred);
705	if (bp->b_wcred != NOCRED)
706		crfree(bp->b_wcred);
707fillbuf:
708	bp->b_flags |= B_BUSY;
709	LIST_REMOVE(bp, b_hash);
710	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
711	splx(s);
712	if (bp->b_bufsize) {
713		allocbuf(bp, 0);
714	}
715	bp->b_flags = B_BUSY;
716	bp->b_dev = NODEV;
717	bp->b_vp = NULL;
718	bp->b_blkno = bp->b_lblkno = 0;
719	bp->b_iodone = 0;
720	bp->b_error = 0;
721	bp->b_resid = 0;
722	bp->b_bcount = 0;
723	bp->b_npages = 0;
724	bp->b_wcred = bp->b_rcred = NOCRED;
725	bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
726	bp->b_dirtyoff = bp->b_dirtyend = 0;
727	bp->b_validoff = bp->b_validend = 0;
728	if (bufspace >= maxbufspace) {
729		s = splbio();
730		bp->b_flags |= B_INVAL;
731		brelse(bp);
732		goto trytofreespace;
733	}
734	return (bp);
735}
736
737/*
738 * Check to see if a block is currently memory resident.
739 */
740struct buf *
741incore(struct vnode * vp, daddr_t blkno)
742{
743	struct buf *bp;
744	struct bufhashhdr *bh;
745
746	int s = splbio();
747
748	bh = BUFHASH(vp, blkno);
749	bp = bh->lh_first;
750
751	/* Search hash chain */
752	while (bp != NULL) {
753		/* hit */
754		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
755		    (bp->b_flags & B_INVAL) == 0) {
756			break;
757		}
758		bp = bp->b_hash.le_next;
759	}
760	splx(s);
761	return (bp);
762}
763
764/*
765 * Returns true if no I/O is needed to access the
766 * associated VM object.  This is like incore except
767 * it also hunts around in the VM system for the data.
768 */
769
770int
771inmem(struct vnode * vp, daddr_t blkno)
772{
773	vm_object_t obj;
774	vm_offset_t off, toff, tinc;
775	vm_page_t m;
776
777	if (incore(vp, blkno))
778		return 1;
779	if (vp->v_mount == NULL)
780		return 0;
781	if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0)
782		return 0;
783
784	obj = vp->v_object;
785	tinc = PAGE_SIZE;
786	if (tinc > vp->v_mount->mnt_stat.f_iosize)
787		tinc = vp->v_mount->mnt_stat.f_iosize;
788	off = blkno * vp->v_mount->mnt_stat.f_iosize;
789
790	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
791
792		m = vm_page_lookup(obj, trunc_page(toff + off));
793		if (!m)
794			return 0;
795		if (vm_page_is_valid(m, toff + off, tinc) == 0)
796			return 0;
797	}
798	return 1;
799}
800
801/*
802 * now we set the dirty range for the buffer --
803 * for NFS -- if the file is mapped and pages have
804 * been written to, let it know.  We want the
805 * entire range of the buffer to be marked dirty if
806 * any of the pages have been written to for consistancy
807 * with the b_validoff, b_validend set in the nfs write
808 * code, and used by the nfs read code.
809 */
810static void
811vfs_setdirty(struct buf *bp) {
812	int i;
813	vm_object_t object;
814	vm_offset_t boffset, offset;
815	/*
816	 * We qualify the scan for modified pages on whether the
817	 * object has been flushed yet.  The OBJ_WRITEABLE flag
818	 * is not cleared simply by protecting pages off.
819	 */
820	if ((bp->b_flags & B_VMIO) &&
821		((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
822		/*
823		 * test the pages to see if they have been modified directly
824		 * by users through the VM system.
825		 */
826		for (i = 0; i < bp->b_npages; i++)
827			vm_page_test_dirty(bp->b_pages[i]);
828
829		/*
830		 * scan forwards for the first page modified
831		 */
832		for (i = 0; i < bp->b_npages; i++) {
833			if (bp->b_pages[i]->dirty) {
834				break;
835			}
836		}
837		boffset = i * PAGE_SIZE;
838		if (boffset < bp->b_dirtyoff) {
839			bp->b_dirtyoff = boffset;
840		}
841
842		/*
843		 * scan backwards for the last page modified
844		 */
845		for (i = bp->b_npages - 1; i >= 0; --i) {
846			if (bp->b_pages[i]->dirty) {
847				break;
848			}
849		}
850		boffset = (i + 1) * PAGE_SIZE;
851		offset = boffset + bp->b_pages[0]->offset;
852		if (offset >= object->size) {
853			boffset = object->size - bp->b_pages[0]->offset;
854		}
855		if (bp->b_dirtyend < boffset) {
856			bp->b_dirtyend = boffset;
857		}
858	}
859}
860
861/*
862 * Get a block given a specified block and offset into a file/device.
863 */
864struct buf *
865getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
866{
867	struct buf *bp;
868	int s;
869	struct bufhashhdr *bh;
870
871	s = splbio();
872loop:
873	if (bp = gbincore(vp, blkno)) {
874		if (bp->b_flags & (B_BUSY|B_INVAL)) {
875			bp->b_flags |= B_WANTED;
876			if (!tsleep(bp, PRIBIO | slpflag, "getblk", slptimeo))
877				goto loop;
878
879			splx(s);
880			return (struct buf *) NULL;
881		}
882		bp->b_flags |= B_BUSY | B_CACHE;
883		bremfree(bp);
884
885		/*
886		 * check for size inconsistancies (note that they shouldn't happen
887		 * but do when filesystems don't handle the size changes correctly.)
888		 * We are conservative on metadata and don't just extend the buffer
889		 * but write and re-constitute it.
890		 */
891		if (bp->b_bcount != size) {
892			if (bp->b_flags & B_VMIO) {
893				allocbuf(bp, size);
894			} else {
895				bp->b_flags |= B_NOCACHE;
896				VOP_BWRITE(bp);
897				goto loop;
898			}
899		}
900		/*
901		 * make sure that all pages in the buffer are valid, if they
902		 * aren't, clear the cache flag.
903		 * ASSUMPTION:
904		 *  if the buffer is greater than 1 page in size, it is assumed
905		 *  that the buffer address starts on a page boundary...
906		 */
907		if (bp->b_flags & B_VMIO) {
908			int szleft, i;
909			szleft = size;
910			for (i=0;i<bp->b_npages;i++) {
911				if (szleft > PAGE_SIZE) {
912					if ((bp->b_pages[i]->valid & VM_PAGE_BITS_ALL) !=
913						VM_PAGE_BITS_ALL) {
914						bp->b_flags &= ~(B_CACHE|B_DONE);
915						break;
916					}
917					szleft -= PAGE_SIZE;
918				} else {
919					if (!vm_page_is_valid(bp->b_pages[i],
920						(((vm_offset_t) bp->b_data) & PAGE_MASK),
921						szleft)) {
922						bp->b_flags &= ~(B_CACHE|B_DONE);
923						break;
924					}
925					szleft = 0;
926				}
927			}
928		}
929		splx(s);
930		return (bp);
931	} else {
932		vm_object_t obj;
933		int doingvmio;
934
935		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
936			doingvmio = 1;
937		} else {
938			doingvmio = 0;
939		}
940		if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
941			if (slpflag || slptimeo)
942				return NULL;
943			goto loop;
944		}
945
946		/*
947		 * This code is used to make sure that a buffer is not
948		 * created while the getnewbuf routine is blocked.
949		 * Normally the vnode is locked so this isn't a problem.
950		 * VBLK type I/O requests, however, don't lock the vnode.
951		 */
952		if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) {
953			bp->b_flags |= B_INVAL;
954			brelse(bp);
955			goto loop;
956		}
957
958		/*
959		 * Insert the buffer into the hash, so that it can
960		 * be found by incore.
961		 */
962		bp->b_blkno = bp->b_lblkno = blkno;
963		bgetvp(vp, bp);
964		LIST_REMOVE(bp, b_hash);
965		bh = BUFHASH(vp, blkno);
966		LIST_INSERT_HEAD(bh, bp, b_hash);
967
968		if (doingvmio) {
969			bp->b_flags |= (B_VMIO | B_CACHE);
970#if defined(VFS_BIO_DEBUG)
971			if (vp->v_type != VREG)
972				printf("getblk: vmioing file type %d???\n", vp->v_type);
973#endif
974		} else {
975			bp->b_flags &= ~B_VMIO;
976		}
977		splx(s);
978
979		allocbuf(bp, size);
980		return (bp);
981	}
982}
983
984/*
985 * Get an empty, disassociated buffer of given size.
986 */
987struct buf *
988geteblk(int size)
989{
990	struct buf *bp;
991
992	while ((bp = getnewbuf(0, 0, 0)) == 0);
993	allocbuf(bp, size);
994	bp->b_flags |= B_INVAL;
995	return (bp);
996}
997
998/*
999 * This code constitutes the buffer memory from either anonymous system
1000 * memory (in the case of non-VMIO operations) or from an associated
1001 * VM object (in the case of VMIO operations).
1002 *
1003 * Note that this code is tricky, and has many complications to resolve
1004 * deadlock or inconsistant data situations.  Tread lightly!!!
1005 *
1006 * Modify the length of a buffer's underlying buffer storage without
1007 * destroying information (unless, of course the buffer is shrinking).
1008 */
1009int
1010allocbuf(struct buf * bp, int size)
1011{
1012
1013	int s;
1014	int newbsize, mbsize;
1015	int i;
1016
1017	if (!(bp->b_flags & B_BUSY))
1018		panic("allocbuf: buffer not busy");
1019
1020	if ((bp->b_flags & B_VMIO) == 0) {
1021		/*
1022		 * Just get anonymous memory from the kernel
1023		 */
1024		mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
1025		newbsize = round_page(size);
1026
1027		if (newbsize < bp->b_bufsize) {
1028			vm_hold_free_pages(
1029			    bp,
1030			    (vm_offset_t) bp->b_data + newbsize,
1031			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1032		} else if (newbsize > bp->b_bufsize) {
1033			vm_hold_load_pages(
1034			    bp,
1035			    (vm_offset_t) bp->b_data + bp->b_bufsize,
1036			    (vm_offset_t) bp->b_data + newbsize);
1037		}
1038	} else {
1039		vm_page_t m;
1040		int desiredpages;
1041
1042		newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE;
1043		desiredpages = round_page(newbsize) / PAGE_SIZE;
1044
1045		if (newbsize < bp->b_bufsize) {
1046			if (desiredpages < bp->b_npages) {
1047				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
1048				    desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages));
1049				for (i = desiredpages; i < bp->b_npages; i++) {
1050					m = bp->b_pages[i];
1051					s = splhigh();
1052					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
1053						m->flags |= PG_WANTED;
1054						tsleep(m, PVM, "biodep", 0);
1055					}
1056					splx(s);
1057
1058					if (m->bmapped == 0) {
1059						printf("allocbuf: bmapped is zero for page %d\n", i);
1060						panic("allocbuf: error");
1061					}
1062					--m->bmapped;
1063					if (m->bmapped == 0) {
1064						vm_page_protect(m, VM_PROT_NONE);
1065						vm_page_free(m);
1066					}
1067					bp->b_pages[i] = NULL;
1068				}
1069				bp->b_npages = desiredpages;
1070			}
1071		} else if (newbsize > bp->b_bufsize) {
1072			vm_object_t obj;
1073			vm_offset_t tinc, off, toff, objoff;
1074			int pageindex, curbpnpages;
1075			struct vnode *vp;
1076			int bsize;
1077
1078			vp = bp->b_vp;
1079			bsize = vp->v_mount->mnt_stat.f_iosize;
1080
1081			if (bp->b_npages < desiredpages) {
1082				obj = vp->v_object;
1083				tinc = PAGE_SIZE;
1084				if (tinc > bsize)
1085					tinc = bsize;
1086				off = bp->b_lblkno * bsize;
1087		doretry:
1088				curbpnpages = bp->b_npages;
1089				bp->b_flags |= B_CACHE;
1090				for (toff = 0; toff < newbsize; toff += tinc) {
1091					int bytesinpage;
1092
1093					pageindex = toff / PAGE_SIZE;
1094					objoff = trunc_page(toff + off);
1095					if (pageindex < curbpnpages) {
1096
1097						m = bp->b_pages[pageindex];
1098						if (m->offset != objoff)
1099							panic("allocbuf: page changed offset??!!!?");
1100						bytesinpage = tinc;
1101						if (tinc > (newbsize - toff))
1102							bytesinpage = newbsize - toff;
1103						if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
1104							bp->b_flags &= ~B_CACHE;
1105						}
1106						if ((m->flags & PG_ACTIVE) == 0) {
1107							vm_page_activate(m);
1108							m->act_count = 0;
1109						}
1110						continue;
1111					}
1112					m = vm_page_lookup(obj, objoff);
1113					if (!m) {
1114						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1115						if (!m) {
1116							int j;
1117
1118							for (j = bp->b_npages; j < pageindex; j++) {
1119								PAGE_WAKEUP(bp->b_pages[j]);
1120							}
1121							VM_WAIT;
1122							goto doretry;
1123						}
1124						vm_page_activate(m);
1125						m->act_count = 0;
1126						m->valid = 0;
1127						bp->b_flags &= ~B_CACHE;
1128					} else if (m->flags & PG_BUSY) {
1129						int j;
1130
1131						for (j = bp->b_npages; j < pageindex; j++) {
1132							PAGE_WAKEUP(bp->b_pages[j]);
1133						}
1134
1135						s = splbio();
1136						m->flags |= PG_WANTED;
1137						tsleep(m, PRIBIO, "pgtblk", 0);
1138						splx(s);
1139
1140						goto doretry;
1141					} else {
1142						if ((curproc != pageproc) &&
1143							(m->flags & PG_CACHE) &&
1144						    (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
1145							pagedaemon_wakeup();
1146						}
1147						bytesinpage = tinc;
1148						if (tinc > (newbsize - toff))
1149							bytesinpage = newbsize - toff;
1150						if (!vm_page_is_valid(m, toff + off, bytesinpage)) {
1151							bp->b_flags &= ~B_CACHE;
1152						}
1153						if ((m->flags & PG_ACTIVE) == 0) {
1154							vm_page_activate(m);
1155							m->act_count = 0;
1156						}
1157						m->flags |= PG_BUSY;
1158					}
1159					bp->b_pages[pageindex] = m;
1160					curbpnpages = pageindex + 1;
1161				}
1162				for (i = bp->b_npages; i < curbpnpages; i++) {
1163					m = bp->b_pages[i];
1164					m->bmapped++;
1165					PAGE_WAKEUP(m);
1166				}
1167				bp->b_npages = curbpnpages;
1168				bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
1169				pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages);
1170				bp->b_data += off % PAGE_SIZE;
1171			}
1172		}
1173	}
1174	bufspace += (newbsize - bp->b_bufsize);
1175	bp->b_bufsize = newbsize;
1176	bp->b_bcount = size;
1177	return 1;
1178}
1179
1180/*
1181 * Wait for buffer I/O completion, returning error status.
1182 */
1183int
1184biowait(register struct buf * bp)
1185{
1186	int s;
1187
1188	s = splbio();
1189	while ((bp->b_flags & B_DONE) == 0)
1190		tsleep(bp, PRIBIO, "biowait", 0);
1191	splx(s);
1192	if (bp->b_flags & B_EINTR) {
1193		bp->b_flags &= ~B_EINTR;
1194		return (EINTR);
1195	}
1196	if (bp->b_flags & B_ERROR) {
1197		return (bp->b_error ? bp->b_error : EIO);
1198	} else {
1199		return (0);
1200	}
1201}
1202
1203/*
1204 * Finish I/O on a buffer, calling an optional function.
1205 * This is usually called from interrupt level, so process blocking
1206 * is not *a good idea*.
1207 */
1208void
1209biodone(register struct buf * bp)
1210{
1211	int s;
1212
1213	s = splbio();
1214	if (!(bp->b_flags & B_BUSY))
1215		panic("biodone: buffer not busy");
1216
1217	if (bp->b_flags & B_DONE) {
1218		splx(s);
1219		printf("biodone: buffer already done\n");
1220		return;
1221	}
1222	bp->b_flags |= B_DONE;
1223
1224	if ((bp->b_flags & B_READ) == 0) {
1225		vwakeup(bp);
1226	}
1227#ifdef BOUNCE_BUFFERS
1228	if (bp->b_flags & B_BOUNCE)
1229		vm_bounce_free(bp);
1230#endif
1231
1232	/* call optional completion function if requested */
1233	if (bp->b_flags & B_CALL) {
1234		bp->b_flags &= ~B_CALL;
1235		(*bp->b_iodone) (bp);
1236		splx(s);
1237		return;
1238	}
1239	if (bp->b_flags & B_VMIO) {
1240		int i, resid;
1241		vm_offset_t foff;
1242		vm_page_t m;
1243		vm_object_t obj;
1244		int iosize;
1245		struct vnode *vp = bp->b_vp;
1246
1247		foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1248		obj = vp->v_object;
1249		if (!obj) {
1250			panic("biodone: no object");
1251		}
1252#if defined(VFS_BIO_DEBUG)
1253		if (obj->paging_in_progress < bp->b_npages) {
1254			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1255			    obj->paging_in_progress, bp->b_npages);
1256		}
1257#endif
1258		iosize = bp->b_bufsize;
1259		for (i = 0; i < bp->b_npages; i++) {
1260			int bogusflag = 0;
1261			m = bp->b_pages[i];
1262			if (m == bogus_page) {
1263				bogusflag = 1;
1264				m = vm_page_lookup(obj, foff);
1265				if (!m) {
1266#if defined(VFS_BIO_DEBUG)
1267					printf("biodone: page disappeared\n");
1268#endif
1269					--obj->paging_in_progress;
1270					continue;
1271				}
1272				bp->b_pages[i] = m;
1273				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1274			}
1275#if defined(VFS_BIO_DEBUG)
1276			if (trunc_page(foff) != m->offset) {
1277				printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset);
1278			}
1279#endif
1280			resid = (m->offset + PAGE_SIZE) - foff;
1281			if (resid > iosize)
1282				resid = iosize;
1283			/*
1284			 * In the write case, the valid and clean bits are
1285			 * already changed correctly, so we only need to do this
1286			 * here in the read case.
1287			 */
1288			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1289				vm_page_set_validclean(m, foff & (PAGE_SIZE-1), resid);
1290			}
1291
1292			/*
1293			 * when debugging new filesystems or buffer I/O methods, this
1294			 * is the most common error that pops up.  if you see this, you
1295			 * have not set the page busy flag correctly!!!
1296			 */
1297			if (m->busy == 0) {
1298				printf("biodone: page busy < 0, "
1299				    "off: %ld, foff: %ld, "
1300				    "resid: %d, index: %d\n",
1301				    m->offset, foff, resid, i);
1302				printf(" iosize: %ld, lblkno: %ld, flags: 0x%x, npages: %d\n",
1303				    bp->b_vp->v_mount->mnt_stat.f_iosize,
1304				    bp->b_lblkno, bp->b_flags, bp->b_npages);
1305				printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n",
1306				    m->valid, m->dirty, m->bmapped);
1307				panic("biodone: page busy < 0\n");
1308			}
1309			--m->busy;
1310			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1311				m->flags &= ~PG_WANTED;
1312				wakeup(m);
1313			}
1314			--obj->paging_in_progress;
1315			foff += resid;
1316			iosize -= resid;
1317		}
1318		if (obj && obj->paging_in_progress == 0 &&
1319		    (obj->flags & OBJ_PIPWNT)) {
1320			obj->flags &= ~OBJ_PIPWNT;
1321			wakeup(obj);
1322		}
1323	}
1324	/*
1325	 * For asynchronous completions, release the buffer now. The brelse
1326	 * checks for B_WANTED and will do the wakeup there if necessary - so
1327	 * no need to do a wakeup here in the async case.
1328	 */
1329
1330	if (bp->b_flags & B_ASYNC) {
1331		brelse(bp);
1332	} else {
1333		bp->b_flags &= ~B_WANTED;
1334		wakeup(bp);
1335	}
1336	splx(s);
1337}
1338
1339int
1340count_lock_queue()
1341{
1342	int count;
1343	struct buf *bp;
1344
1345	count = 0;
1346	for (bp = bufqueues[QUEUE_LOCKED].tqh_first;
1347	    bp != NULL;
1348	    bp = bp->b_freelist.tqe_next)
1349		count++;
1350	return (count);
1351}
1352
1353int vfs_update_interval = 30;
1354
1355void
1356vfs_update()
1357{
1358	(void) spl0();
1359	while (1) {
1360		tsleep(&vfs_update_wakeup, PRIBIO, "update",
1361		    hz * vfs_update_interval);
1362		vfs_update_wakeup = 0;
1363		sync(curproc, NULL, NULL);
1364	}
1365}
1366
1367/*
1368 * This routine is called in lieu of iodone in the case of
1369 * incomplete I/O.  This keeps the busy status for pages
1370 * consistant.
1371 */
1372void
1373vfs_unbusy_pages(struct buf * bp)
1374{
1375	int i;
1376
1377	if (bp->b_flags & B_VMIO) {
1378		struct vnode *vp = bp->b_vp;
1379		vm_object_t obj = vp->v_object;
1380		vm_offset_t foff;
1381
1382		foff = trunc_page(vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno);
1383
1384		for (i = 0; i < bp->b_npages; i++) {
1385			vm_page_t m = bp->b_pages[i];
1386
1387			if (m == bogus_page) {
1388				m = vm_page_lookup(obj, foff + i * PAGE_SIZE);
1389				if (!m) {
1390					panic("vfs_unbusy_pages: page missing\n");
1391				}
1392				bp->b_pages[i] = m;
1393				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1394			}
1395			--obj->paging_in_progress;
1396			--m->busy;
1397			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1398				m->flags &= ~PG_WANTED;
1399				wakeup(m);
1400			}
1401		}
1402		if (obj->paging_in_progress == 0 &&
1403		    (obj->flags & OBJ_PIPWNT)) {
1404			obj->flags &= ~OBJ_PIPWNT;
1405			wakeup(obj);
1406		}
1407	}
1408}
1409
1410/*
1411 * This routine is called before a device strategy routine.
1412 * It is used to tell the VM system that paging I/O is in
1413 * progress, and treat the pages associated with the buffer
1414 * almost as being PG_BUSY.  Also the object paging_in_progress
1415 * flag is handled to make sure that the object doesn't become
1416 * inconsistant.
1417 */
1418void
1419vfs_busy_pages(struct buf * bp, int clear_modify)
1420{
1421	int i;
1422
1423	if (bp->b_flags & B_VMIO) {
1424		vm_object_t obj = bp->b_vp->v_object;
1425		vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1426		int iocount = bp->b_bufsize;
1427
1428		vfs_setdirty(bp);
1429		for (i = 0; i < bp->b_npages; i++) {
1430			vm_page_t m = bp->b_pages[i];
1431			int resid = (m->offset + PAGE_SIZE) - foff;
1432
1433			if (resid > iocount)
1434				resid = iocount;
1435			if ((bp->b_flags & B_CLUSTER) == 0) {
1436				obj->paging_in_progress++;
1437				m->busy++;
1438			}
1439			if (clear_modify) {
1440				vm_page_protect(m, VM_PROT_READ);
1441				vm_page_set_validclean(m,
1442					foff & (PAGE_SIZE-1), resid);
1443			} else if (bp->b_bcount >= PAGE_SIZE) {
1444				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1445					bp->b_pages[i] = bogus_page;
1446					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1447				}
1448			}
1449			foff += resid;
1450			iocount -= resid;
1451		}
1452	}
1453}
1454
1455/*
1456 * Tell the VM system that the pages associated with this buffer
1457 * are clean.  This is used for delayed writes where the data is
1458 * going to go to disk eventually without additional VM intevention.
1459 */
1460void
1461vfs_clean_pages(struct buf * bp)
1462{
1463	int i;
1464
1465	if (bp->b_flags & B_VMIO) {
1466		vm_offset_t foff =
1467			bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1468		int iocount = bp->b_bufsize;
1469
1470		for (i = 0; i < bp->b_npages; i++) {
1471			vm_page_t m = bp->b_pages[i];
1472			int resid = (m->offset + PAGE_SIZE) - foff;
1473
1474			if (resid > iocount)
1475				resid = iocount;
1476			if (resid > 0) {
1477				vm_page_set_validclean(m,
1478					foff & (PAGE_SIZE-1), resid);
1479			}
1480			foff += resid;
1481			iocount -= resid;
1482		}
1483	}
1484}
1485
1486void
1487vfs_bio_clrbuf(struct buf *bp) {
1488	int i;
1489	if( bp->b_flags & B_VMIO) {
1490		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1491			int j;
1492			if( bp->b_pages[0]->valid != VM_PAGE_BITS_ALL) {
1493				for(j=0; j < bp->b_bufsize / DEV_BSIZE;j++) {
1494					bzero(bp->b_data + j * DEV_BSIZE, DEV_BSIZE);
1495				}
1496			}
1497			bp->b_resid = 0;
1498			return;
1499		}
1500		for(i=0;i<bp->b_npages;i++) {
1501			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1502				continue;
1503			if( bp->b_pages[i]->valid == 0) {
1504				bzero(bp->b_data + i * PAGE_SIZE, PAGE_SIZE);
1505			} else {
1506				int j;
1507				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1508					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1509						bzero(bp->b_data + i * PAGE_SIZE + j * DEV_BSIZE, DEV_BSIZE);
1510				}
1511			}
1512			bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
1513		}
1514		bp->b_resid = 0;
1515	} else {
1516		clrbuf(bp);
1517	}
1518}
1519
1520/*
1521 * vm_hold_load_pages and vm_hold_unload pages get pages into
1522 * a buffers address space.  The pages are anonymous and are
1523 * not associated with a file object.
1524 */
1525void
1526vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
1527{
1528	vm_offset_t pg;
1529	vm_page_t p;
1530	vm_offset_t from = round_page(froma);
1531	vm_offset_t to = round_page(toa);
1532
1533	for (pg = from; pg < to; pg += PAGE_SIZE) {
1534
1535tryagain:
1536
1537		p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS,
1538		    VM_ALLOC_NORMAL);
1539		if (!p) {
1540			VM_WAIT;
1541			goto tryagain;
1542		}
1543		vm_page_wire(p);
1544		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1545		bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p;
1546		PAGE_WAKEUP(p);
1547		bp->b_npages++;
1548	}
1549}
1550
1551void
1552vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa)
1553{
1554	vm_offset_t pg;
1555	vm_page_t p;
1556	vm_offset_t from = round_page(froma);
1557	vm_offset_t to = round_page(toa);
1558
1559	for (pg = from; pg < to; pg += PAGE_SIZE) {
1560		p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE];
1561		bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0;
1562		pmap_kremove(pg);
1563		vm_page_free(p);
1564		--bp->b_npages;
1565	}
1566}
1567