Deleted Added
full compact
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
36 * $Id: vfs_cluster.c,v 1.12 1995/03/04 03:24:28 davidg Exp $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/proc.h>
42#include <sys/buf.h>
43#include <sys/vnode.h>
44#include <sys/mount.h>
45#include <sys/malloc.h>
46#include <sys/resourcevar.h>
47#include <sys/vmmeter.h>
48#include <miscfs/specfs/specdev.h>
49#include <vm/vm.h>
50#include <vm/vm_pageout.h>
51
52#ifdef DEBUG
53#include <vm/vm.h>
54#include <sys/sysctl.h>
55int doreallocblks = 0;
56struct ctldebug debug13 = {"doreallocblks", &doreallocblks};
57
58#else
59/* XXX for cluster_write */
60#define doreallocblks 0
61#endif
62
63/*
64 * Local declarations
65 */
66struct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *,
67 daddr_t, daddr_t, long, int, long));
68struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
69
70int totreads;
71int totreadblocks;
72
73#ifdef DIAGNOSTIC
74/*
75 * Set to 1 if reads of block zero should cause readahead to be done.
76 * Set to 0 treats a read of block zero as a non-sequential read.
77 *
78 * Setting to one assumes that most reads of block zero of files are due to
79 * sequential passes over the files (e.g. cat, sum) where additional blocks
80 * will soon be needed. Setting to zero assumes that the majority are
81 * surgical strikes to get particular info (e.g. size, file) where readahead
82 * blocks will not be used and, in fact, push out other potentially useful
83 * blocks from the cache. The former seems intuitive, but some quick tests
84 * showed that the latter performed better from a system-wide point of view.
85 */
86 int doclusterraz = 0;
87
88#define ISSEQREAD(vp, blk) \
89 (((blk) != 0 || doclusterraz) && \
90 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
91#else
92#define ISSEQREAD(vp, blk) \
93 (/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
94#endif
95
96/*
97 * This replaces bread. If this is a bread at the beginning of a file and
98 * lastr is 0, we assume this is the first read and we'll read up to two
99 * blocks if they are sequential. After that, we'll do regular read ahead
100 * in clustered chunks.
101 * bp is the block requested.
102 * rbp is the read-ahead block.
103 * If either is NULL, then you don't have to do the I/O.
104 */
105int
106cluster_read(vp, filesize, lblkno, size, cred, bpp)
107 struct vnode *vp;
108 u_quad_t filesize;
109 daddr_t lblkno;
110 long size;
111 struct ucred *cred;
112 struct buf **bpp;
113{
114 struct buf *bp, *rbp;
115 daddr_t blkno, rablkno, origlblkno;
116 long flags;
117 int error, num_ra, alreadyincore;
118
119 origlblkno = lblkno;
120 error = 0;
121 /*
122 * get the requested block
123 */
124 *bpp = bp = getblk(vp, lblkno, size, 0, 0);
125 /*
126 * if it is in the cache, then check to see if the reads have been
127 * sequential. If they have, then try some read-ahead, otherwise
128 * back-off on prospective read-aheads.
129 */
130 if (bp->b_flags & B_CACHE) {
131 int i;
132
133 if (!ISSEQREAD(vp, origlblkno)) {
134 vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
135 vp->v_ralen >>= 1;
136 return 0;
137 } else if( vp->v_maxra >= origlblkno) {
138 if ((vp->v_ralen + 1) < (MAXPHYS / size))
139 vp->v_ralen++;
140 if ( vp->v_maxra >= (origlblkno + vp->v_ralen))
141 return 0;
142 lblkno = vp->v_maxra;
143 }
144 bp = NULL;
145 } else {
146 /*
147 * if it isn't in the cache, then get a chunk from disk if
148 * sequential, otherwise just get the block.
149 */
150 bp->b_flags |= B_READ;
151 lblkno += 1;
152 curproc->p_stats->p_ru.ru_inblock++; /* XXX */
153 }
154 /*
155 * if ralen is "none", then try a little
156 */
157 if (vp->v_ralen == 0)
158 vp->v_ralen = 1;
159 /*
160 * assume no read-ahead
161 */
162 alreadyincore = 1;
163 rablkno = lblkno;
164
165 /*
166 * if we have been doing sequential I/O, then do some read-ahead
167 */
168 if (ISSEQREAD(vp, origlblkno)) {
169 int i;
170
171 /*
172 * this code makes sure that the stuff that we have read-ahead
173 * is still in the cache. If it isn't, we have been reading
174 * ahead too much, and we need to back-off, otherwise we might
175 * try to read more.
176 */
177 for (i = 0; i < vp->v_ralen; i++) {
178 rablkno = lblkno + i;
179 alreadyincore = (int) incore(vp, rablkno);
180 if (!alreadyincore) {
181 if (rablkno < vp->v_maxra) {
182 vp->v_maxra = rablkno;
183 vp->v_ralen >>= 1;
184 alreadyincore = 1;
185 } else {
186 if (inmem(vp, rablkno)) {
187 if( vp->v_maxra < rablkno)
188 vp->v_maxra = rablkno + 1;
189 continue;
190 }
191 if ((vp->v_ralen + 1) < MAXPHYS / size)
192 vp->v_ralen++;
193 }
194 break;
195 } else if( vp->v_maxra < rablkno) {
196 vp->v_maxra = rablkno + 1;
197 }
198 }
199 }
200 /*
201 * we now build the read-ahead buffer if it is desirable.
202 */
203 rbp = NULL;
204 if (!alreadyincore &&
205 (rablkno + 1) * size <= filesize &&
206 !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra)) &&
207 blkno != -1) {
208 if ((vp->v_ralen + 1) < MAXPHYS / size)
209 vp->v_ralen++;
210 if (num_ra > vp->v_ralen)
211 num_ra = vp->v_ralen;
212
213 if (num_ra) {
214 rbp = cluster_rbuild(vp, filesize,
215 NULL, rablkno, blkno, size, num_ra, B_READ | B_ASYNC);
216 } else {
217 rbp = getblk(vp, rablkno, size, 0, 0);
218 rbp->b_flags |= B_READ | B_ASYNC;
219 rbp->b_blkno = blkno;
220 }
221 }
222
223 /*
224 * if the synchronous read is a cluster, handle it, otherwise do a
225 * simple, non-clustered read.
226 */
227 if (bp) {
228 if (bp->b_flags & (B_DONE | B_DELWRI))
229 panic("cluster_read: DONE bp");
230 else {
231 vfs_busy_pages(bp, 0);
232 error = VOP_STRATEGY(bp);
233 vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
234 totreads++;
235 totreadblocks += bp->b_bcount / size;
236 curproc->p_stats->p_ru.ru_inblock++;
237 }
238 }
239 /*
240 * and if we have read-aheads, do them too
241 */
242 if (rbp) {
243 vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
244 if (error || (rbp->b_flags & B_CACHE)) {
245 rbp->b_flags &= ~(B_ASYNC | B_READ);
246 brelse(rbp);
247 } else {
248 vfs_busy_pages(rbp, 0);
249 (void) VOP_STRATEGY(rbp);
250 totreads++;
251 totreadblocks += rbp->b_bcount / size;
252 curproc->p_stats->p_ru.ru_inblock++;
253 }
254 }
255 if (bp && ((bp->b_flags & B_ASYNC) == 0))
256 return (biowait(bp));
257 return (error);
258}
259
260/*
261 * If blocks are contiguous on disk, use this to provide clustered
262 * read ahead. We will read as many blocks as possible sequentially
263 * and then parcel them up into logical blocks in the buffer hash table.
264 */
265struct buf *
266cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags)
267 struct vnode *vp;
268 u_quad_t filesize;
269 struct buf *bp;
270 daddr_t lbn;
271 daddr_t blkno;
272 long size;
273 int run;
274 long flags;
275{
276 struct cluster_save *b_save;
277 struct buf *tbp;
278 daddr_t bn;
279 int i, inc, j;
280
281#ifdef DIAGNOSTIC
282 if (size != vp->v_mount->mnt_stat.f_iosize)
283 panic("cluster_rbuild: size %d != filesize %d\n",
284 size, vp->v_mount->mnt_stat.f_iosize);
285#endif
286 if (size * (lbn + run + 1) > filesize)
287 --run;
288 if (run == 0) {
289 if (!bp) {
290 bp = getblk(vp, lbn, size, 0, 0);
291 bp->b_blkno = blkno;
292 bp->b_flags |= flags;
293 }
294 return (bp);
295 }
296 tbp = bp;
297 if (!tbp) {
298 tbp = getblk(vp, lbn, size, 0, 0);
299 }
300 if (tbp->b_flags & B_CACHE) {
301 return (tbp);
302 } else if (bp == NULL) {
303 tbp->b_flags |= B_ASYNC;
304 }
305 bp = getpbuf();
306 bp->b_flags = flags | B_CALL | B_BUSY | B_CLUSTER;
307 bp->b_iodone = cluster_callback;
308 bp->b_blkno = blkno;
309 bp->b_lblkno = lbn;
310 pbgetvp(vp, bp);
311
312 b_save = malloc(sizeof(struct buf *) * (run + 1) + sizeof(struct cluster_save),
313 M_SEGMENT, M_WAITOK);
314 b_save->bs_nchildren = 0;
315 b_save->bs_children = (struct buf **) (b_save + 1);
316 bp->b_saveaddr = b_save;
317
318 bp->b_bcount = 0;
319 bp->b_bufsize = 0;
320 bp->b_npages = 0;
321
322 if (tbp->b_flags & B_VMIO)
323 bp->b_flags |= B_VMIO;
324
325 inc = btodb(size);
326 for (bn = blkno, i = 0; i <= run; ++i, bn += inc) {
327 if (i != 0) {
328 tbp = getblk(vp, lbn + i, size, 0, 0);
329 if ((tbp->b_flags & B_CACHE) ||
330 (tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO)) {
331 brelse(tbp);
332 break;
333 }
334 tbp->b_blkno = bn;
335 tbp->b_flags |= flags | B_READ | B_ASYNC;
336 } else {
337 tbp->b_flags |= flags | B_READ;
338 }
339 ++b_save->bs_nchildren;
340 b_save->bs_children[i] = tbp;
341 for (j = 0; j < tbp->b_npages; j += 1) {
342 bp->b_pages[j + bp->b_npages] = tbp->b_pages[j];
343 }
344 bp->b_npages += tbp->b_npages;
345 bp->b_bcount += size;
346 bp->b_bufsize += size;
347 }
348 pmap_qenter((vm_offset_t) bp->b_data, (vm_page_t *)bp->b_pages, bp->b_npages);
349 return (bp);
350}
351
352/*
353 * Cleanup after a clustered read or write.
354 * This is complicated by the fact that any of the buffers might have
355 * extra memory (if there were no empty buffer headers at allocbuf time)
356 * that we will need to shift around.
357 */
358void
359cluster_callback(bp)
360 struct buf *bp;
361{
362 struct cluster_save *b_save;
363 struct buf **bpp, *tbp;
364 caddr_t cp;
365 int error = 0;
366
367 /*
368 * Must propogate errors to all the components.
369 */
370 if (bp->b_flags & B_ERROR)
371 error = bp->b_error;
372
373 b_save = (struct cluster_save *) (bp->b_saveaddr);
374 pmap_qremove((vm_offset_t) bp->b_data, bp->b_npages);
375 /*
376 * Move memory from the large cluster buffer into the component
377 * buffers and mark IO as done on these.
378 */
379 for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) {
380 tbp = *bpp;
381 if (error) {
382 tbp->b_flags |= B_ERROR;
383 tbp->b_error = error;
384 }
385 biodone(tbp);
386 }
387 free(b_save, M_SEGMENT);
388 relpbuf(bp);
389}
390
391/*
392 * Do clustered write for FFS.
393 *
394 * Three cases:
395 * 1. Write is not sequential (write asynchronously)
396 * Write is sequential:
397 * 2. beginning of cluster - begin cluster
398 * 3. middle of a cluster - add to cluster
399 * 4. end of a cluster - asynchronously write cluster
400 */
401void
402cluster_write(bp, filesize)
403 struct buf *bp;
404 u_quad_t filesize;
405{
406 struct vnode *vp;
407 daddr_t lbn;
408 int maxclen, cursize;
409 int lblocksize;
410
411 vp = bp->b_vp;
412 lblocksize = vp->v_mount->mnt_stat.f_iosize;
413 lbn = bp->b_lblkno;
414
415 /* Initialize vnode to beginning of file. */
416 if (lbn == 0)
417 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
418
419 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
420 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
421 maxclen = MAXPHYS / lblocksize - 1;
422 if (vp->v_clen != 0) {
423 /*
424 * Next block is not sequential.
425 *
426 * If we are not writing at end of file, the process
427 * seeked to another point in the file since its last
428 * write, or we have reached our maximum cluster size,
429 * then push the previous cluster. Otherwise try
430 * reallocating to make it sequential.
431 */
432 cursize = vp->v_lastw - vp->v_cstart + 1;
433 cluster_wbuild(vp, NULL, lblocksize,
434 vp->v_cstart, cursize, lbn);
435 }
436 /*
437 * Consider beginning a cluster. If at end of file, make
438 * cluster as large as possible, otherwise find size of
439 * existing cluster.
440 */
441 if ((lbn + 1) * lblocksize != filesize &&
442 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) ||
443 bp->b_blkno == -1)) {
444 bawrite(bp);
445 vp->v_clen = 0;
446 vp->v_lasta = bp->b_blkno;
447 vp->v_cstart = lbn + 1;
448 vp->v_lastw = lbn;
449 return;
450 }
451 vp->v_clen = maxclen;
452 if (maxclen == 0) { /* I/O not contiguous */
453 vp->v_cstart = lbn + 1;
454 bawrite(bp);
455 } else { /* Wait for rest of cluster */
456 vp->v_cstart = lbn;
457 bdwrite(bp);
458 }
459 } else if (lbn == vp->v_cstart + vp->v_clen) {
460 /*
461 * At end of cluster, write it out.
462 */
463 cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
464 vp->v_clen + 1, lbn);
465 vp->v_clen = 0;
466 vp->v_cstart = lbn + 1;
467 } else
468 /*
469 * In the middle of a cluster, so just delay the I/O for now.
470 */
471 bdwrite(bp);
472 vp->v_lastw = lbn;
473 vp->v_lasta = bp->b_blkno;
474}
475
476
477/*
478 * This is an awful lot like cluster_rbuild...wish they could be combined.
479 * The last lbn argument is the current block on which I/O is being
480 * performed. Check to see that it doesn't fall in the middle of
481 * the current block (if last_bp == NULL).
482 */
483void
484cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
485 struct vnode *vp;
486 struct buf *last_bp;
487 long size;
488 daddr_t start_lbn;
489 int len;
490 daddr_t lbn;
491{
492 struct cluster_save *b_save;
493 struct buf *bp, *tbp, *pb;
494 caddr_t cp;
495 int i, j, s;
496
497#ifdef DIAGNOSTIC
498 if (size != vp->v_mount->mnt_stat.f_iosize)
499 panic("cluster_wbuild: size %d != filesize %d\n",
500 size, vp->v_mount->mnt_stat.f_iosize);
501#endif
502redo:
503 if( (lbn != -1) || (last_bp == 0)) {
504 while ((!(tbp = incore(vp, start_lbn)) || (tbp->b_flags & B_BUSY)
505 || (start_lbn == lbn)) && len) {
506 ++start_lbn;
507 --len;
508 }
509
510 pb = trypbuf();
511 /* Get more memory for current buffer */
512 if (len <= 1 || pb == 0) {
513 relpbuf(pb);
514 if (last_bp) {
515 bawrite(last_bp);
516 } else if (len) {
517 bp = getblk(vp, start_lbn, size, 0, 0);
518 bawrite(bp);
519 }
520 return;
521 }
522 tbp = getblk(vp, start_lbn, size, 0, 0);
523 } else {
524 tbp = last_bp;
525 if( tbp->b_flags & B_BUSY) {
526 printf("vfs_cluster: warning: buffer already busy\n");
527 }
528 tbp->b_flags |= B_BUSY;
529 last_bp = 0;
530 pb = trypbuf();
531 if( pb == 0) {
532 bawrite(tbp);
533 return;
534 }
535 }
536
537 if (!(tbp->b_flags & B_DELWRI)) {
538 relpbuf(pb);
539 ++start_lbn;
540 --len;
541 brelse(tbp);
542 goto redo;
543 }
544 /*
545 * Extra memory in the buffer, punt on this buffer. XXX we could
546 * handle this in most cases, but we would have to push the extra
547 * memory down to after our max possible cluster size and then
548 * potentially pull it back up if the cluster was terminated
549 * prematurely--too much hassle.
550 */
551 if (tbp->b_bcount != tbp->b_bufsize) {
552 relpbuf(pb);
553 ++start_lbn;
554 --len;
555 bawrite(tbp);
556 goto redo;
557 }
558 bp = pb;
559 b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save),
560 M_SEGMENT, M_WAITOK);
561 b_save->bs_nchildren = 0;
562 b_save->bs_children = (struct buf **) (b_save + 1);
563 bp->b_saveaddr = b_save;
564 bp->b_bcount = 0;
565 bp->b_bufsize = 0;
566 bp->b_npages = 0;
567
568 if (tbp->b_flags & B_VMIO)
569 bp->b_flags |= B_VMIO;
570
571 bp->b_blkno = tbp->b_blkno;
572 bp->b_lblkno = tbp->b_lblkno;
573 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
574 bp->b_iodone = cluster_callback;
575 pbgetvp(vp, bp);
576
577 for (i = 0; i < len; ++i, ++start_lbn) {
578 if (i != 0) {
579 /*
580 * Block is not in core or the non-sequential block
581 * ending our cluster was part of the cluster (in
582 * which case we don't want to write it twice).
583 */
584 if (!(tbp = incore(vp, start_lbn)) ||
585 (last_bp == NULL && start_lbn == lbn))
586 break;
587
588 if ((tbp->b_flags & (B_INVAL | B_CLUSTEROK)) != B_CLUSTEROK)
589 break;
590
591 if ((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))
592 break;
593
594 /*
595 * Get the desired block buffer (unless it is the
596 * final sequential block whose buffer was passed in
597 * explictly as last_bp).
598 */
599 if (last_bp == NULL || start_lbn != lbn) {
600 if( tbp->b_flags & B_BUSY)
601 break;
602 tbp = getblk(vp, start_lbn, size, 0, 0);
603 if (!(tbp->b_flags & B_DELWRI) ||
604 ((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) {
605 brelse(tbp);
606 break;
607 }
608 } else
609 tbp = last_bp;
610 }
611 for (j = 0; j < tbp->b_npages; j += 1) {
612 bp->b_pages[j + bp->b_npages] = tbp->b_pages[j];
613 }
614 bp->b_npages += tbp->b_npages;
615 bp->b_bcount += size;
616 bp->b_bufsize += size;
617
618 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
619 tbp->b_flags |= B_ASYNC;
620 s = splbio();
621 reassignbuf(tbp, tbp->b_vp); /* put on clean list */
622 ++tbp->b_vp->v_numoutput;
623 splx(s);
624 b_save->bs_children[i] = tbp;
625 }
626 b_save->bs_nchildren = i;
627 pmap_qenter((vm_offset_t) bp->b_data, (vm_page_t *) bp->b_pages, bp->b_npages);
628 bawrite(bp);
629
630 if (i < len) {
631 len -= i;
632 goto redo;
633 }
634}
635
636/*
637 * Collect together all the buffers in a cluster.
638 * Plus add one additional buffer.
639 */
640struct cluster_save *
641cluster_collectbufs(vp, last_bp)
642 struct vnode *vp;
643 struct buf *last_bp;
644{
645 struct cluster_save *buflist;
646 daddr_t lbn;
647 int i, len;
648
649 len = vp->v_lastw - vp->v_cstart + 1;
650 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
651 M_SEGMENT, M_WAITOK);
652 buflist->bs_nchildren = 0;
653 buflist->bs_children = (struct buf **) (buflist + 1);
654 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
655 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
656 &buflist->bs_children[i]);
657 buflist->bs_children[i] = last_bp;
658 buflist->bs_nchildren = i + 1;
659 return (buflist);
660}