Deleted Added
full compact
vfs_bio.c (76827) vfs_bio.c (77085)
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
13 *
1/*
2 * Copyright (c) 1994,1997 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
13 *
14 * $FreeBSD: head/sys/kern/vfs_bio.c 76827 2001-05-19 01:28:09Z alfred $
14 * $FreeBSD: head/sys/kern/vfs_bio.c 77085 2001-05-23 22:24:49Z jhb $
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme. Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *

--- 418 unchanged lines hidden (view full) ---

441/*
442 * bfreekva() - free the kva allocation for a buffer.
443 *
444 * Must be called at splbio() or higher as this is the only locking for
445 * buffer_map.
446 *
447 * Since this call frees up buffer space, we call bufspacewakeup().
448 *
15 */
16
17/*
18 * this file contains a new buffer I/O scheme implementing a coherent
19 * VM object and buffer cache scheme. Pains have been taken to make
20 * sure that the performance degradation associated with schemes such
21 * as this is not realized.
22 *

--- 418 unchanged lines hidden (view full) ---

441/*
442 * bfreekva() - free the kva allocation for a buffer.
443 *
444 * Must be called at splbio() or higher as this is the only locking for
445 * buffer_map.
446 *
447 * Since this call frees up buffer space, we call bufspacewakeup().
448 *
449 * Can be called with or without the vm_mtx.
449 * Must be called without the vm_mtx.
450 */
451static void
452bfreekva(struct buf * bp)
453{
454
450 */
451static void
452bfreekva(struct buf * bp)
453{
454
455 mtx_assert(&vm_mtx, MA_NOTOWNED);
455 if (bp->b_kvasize) {
456 if (bp->b_kvasize) {
456 int hadvmlock;
457
458 ++buffreekvacnt;
459 bufspace -= bp->b_kvasize;
457 ++buffreekvacnt;
458 bufspace -= bp->b_kvasize;
460 hadvmlock = mtx_owned(&vm_mtx);
461 if (!hadvmlock)
462 mtx_lock(&vm_mtx);
459 mtx_lock(&vm_mtx);
463 vm_map_delete(buffer_map,
464 (vm_offset_t) bp->b_kvabase,
465 (vm_offset_t) bp->b_kvabase + bp->b_kvasize
466 );
460 vm_map_delete(buffer_map,
461 (vm_offset_t) bp->b_kvabase,
462 (vm_offset_t) bp->b_kvabase + bp->b_kvasize
463 );
467 if (!hadvmlock)
468 mtx_unlock(&vm_mtx);
464 mtx_unlock(&vm_mtx);
469 bp->b_kvasize = 0;
470 bufspacewakeup();
471 }
472}
473
474/*
475 * bremfree:
476 *

--- 853 unchanged lines hidden (view full) ---

1330
1331/*
1332 * Must be called with vm_mtx held.
1333 */
1334static void
1335vfs_vmio_release(bp)
1336 struct buf *bp;
1337{
465 bp->b_kvasize = 0;
466 bufspacewakeup();
467 }
468}
469
470/*
471 * bremfree:
472 *

--- 853 unchanged lines hidden (view full) ---

1326
1327/*
1328 * Must be called with vm_mtx held.
1329 */
1330static void
1331vfs_vmio_release(bp)
1332 struct buf *bp;
1333{
1338 int i, s;
1334 int i;
1339 vm_page_t m;
1340
1335 vm_page_t m;
1336
1341 s = splvm();
1342 mtx_assert(&vm_mtx, MA_OWNED);
1343 for (i = 0; i < bp->b_npages; i++) {
1344 m = bp->b_pages[i];
1345 bp->b_pages[i] = NULL;
1346 /*
1347 * In order to keep page LRU ordering consistent, put
1348 * everything on the inactive queue.
1349 */

--- 16 unchanged lines hidden (view full) ---

1366 vm_page_busy(m);
1367 vm_page_protect(m, VM_PROT_NONE);
1368 vm_page_free(m);
1369 } else if (vm_page_count_severe()) {
1370 vm_page_try_to_cache(m);
1371 }
1372 }
1373 }
1337 mtx_assert(&vm_mtx, MA_OWNED);
1338 for (i = 0; i < bp->b_npages; i++) {
1339 m = bp->b_pages[i];
1340 bp->b_pages[i] = NULL;
1341 /*
1342 * In order to keep page LRU ordering consistent, put
1343 * everything on the inactive queue.
1344 */

--- 16 unchanged lines hidden (view full) ---

1361 vm_page_busy(m);
1362 vm_page_protect(m, VM_PROT_NONE);
1363 vm_page_free(m);
1364 } else if (vm_page_count_severe()) {
1365 vm_page_try_to_cache(m);
1366 }
1367 }
1368 }
1374 splx(s);
1375 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1376
1377 /* could drop vm_mtx here */
1378
1379 if (bp->b_bufsize) {
1380 bufspacewakeup();
1381 bp->b_bufsize = 0;
1382 }

--- 382 unchanged lines hidden (view full) ---

1765 * to keep fragmentation sane we only allocate kva in
1766 * BKVASIZE chunks.
1767 */
1768 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
1769
1770 if (maxsize != bp->b_kvasize) {
1771 vm_offset_t addr = 0;
1772
1369 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1370
1371 /* could drop vm_mtx here */
1372
1373 if (bp->b_bufsize) {
1374 bufspacewakeup();
1375 bp->b_bufsize = 0;
1376 }

--- 382 unchanged lines hidden (view full) ---

1759 * to keep fragmentation sane we only allocate kva in
1760 * BKVASIZE chunks.
1761 */
1762 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
1763
1764 if (maxsize != bp->b_kvasize) {
1765 vm_offset_t addr = 0;
1766
1773 /* we'll hold the lock over some vm ops */
1774 mtx_lock(&vm_mtx);
1775 bfreekva(bp);
1776
1767 bfreekva(bp);
1768
1769 mtx_lock(&vm_mtx);
1777 if (vm_map_findspace(buffer_map,
1778 vm_map_min(buffer_map), maxsize, &addr)) {
1779 /*
1780 * Uh oh. Buffer map is to fragmented. We
1781 * must defragment the map.
1782 */
1783 mtx_unlock(&vm_mtx);
1784 ++bufdefragcnt;

--- 216 unchanged lines hidden (view full) ---

2001 * Sets the dirty range for a buffer based on the status of the dirty
2002 * bits in the pages comprising the buffer.
2003 *
2004 * The range is limited to the size of the buffer.
2005 *
2006 * This routine is primarily used by NFS, but is generalized for the
2007 * B_VMIO case.
2008 *
1770 if (vm_map_findspace(buffer_map,
1771 vm_map_min(buffer_map), maxsize, &addr)) {
1772 /*
1773 * Uh oh. Buffer map is to fragmented. We
1774 * must defragment the map.
1775 */
1776 mtx_unlock(&vm_mtx);
1777 ++bufdefragcnt;

--- 216 unchanged lines hidden (view full) ---

1994 * Sets the dirty range for a buffer based on the status of the dirty
1995 * bits in the pages comprising the buffer.
1996 *
1997 * The range is limited to the size of the buffer.
1998 *
1999 * This routine is primarily used by NFS, but is generalized for the
2000 * B_VMIO case.
2001 *
2009 * Can be called with or without vm_mtx
2002 * Must be called with vm_mtx
2010 */
2011static void
2012vfs_setdirty(struct buf *bp)
2013{
2014 int i;
2003 */
2004static void
2005vfs_setdirty(struct buf *bp)
2006{
2007 int i;
2015 int hadvmlock;
2016 vm_object_t object;
2017
2008 vm_object_t object;
2009
2010 mtx_assert(&vm_mtx, MA_OWNED);
2018 /*
2019 * Degenerate case - empty buffer
2020 */
2021
2022 if (bp->b_bufsize == 0)
2023 return;
2024
2025 /*
2026 * We qualify the scan for modified pages on whether the
2027 * object has been flushed yet. The OBJ_WRITEABLE flag
2028 * is not cleared simply by protecting pages off.
2029 */
2030
2031 if ((bp->b_flags & B_VMIO) == 0)
2032 return;
2033
2011 /*
2012 * Degenerate case - empty buffer
2013 */
2014
2015 if (bp->b_bufsize == 0)
2016 return;
2017
2018 /*
2019 * We qualify the scan for modified pages on whether the
2020 * object has been flushed yet. The OBJ_WRITEABLE flag
2021 * is not cleared simply by protecting pages off.
2022 */
2023
2024 if ((bp->b_flags & B_VMIO) == 0)
2025 return;
2026
2034 hadvmlock = mtx_owned(&vm_mtx);
2035 if (!hadvmlock)
2036 mtx_lock(&vm_mtx);
2037
2038 object = bp->b_pages[0]->object;
2039
2040 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
2041 printf("Warning: object %p writeable but not mightbedirty\n", object);
2042 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
2043 printf("Warning: object %p mightbedirty but not writeable\n", object);
2044
2045 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {

--- 41 unchanged lines hidden (view full) ---

2087
2088 if (boffset < eoffset) {
2089 if (bp->b_dirtyoff > boffset)
2090 bp->b_dirtyoff = boffset;
2091 if (bp->b_dirtyend < eoffset)
2092 bp->b_dirtyend = eoffset;
2093 }
2094 }
2027 object = bp->b_pages[0]->object;
2028
2029 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
2030 printf("Warning: object %p writeable but not mightbedirty\n", object);
2031 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
2032 printf("Warning: object %p mightbedirty but not writeable\n", object);
2033
2034 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {

--- 41 unchanged lines hidden (view full) ---

2076
2077 if (boffset < eoffset) {
2078 if (bp->b_dirtyoff > boffset)
2079 bp->b_dirtyoff = boffset;
2080 if (bp->b_dirtyend < eoffset)
2081 bp->b_dirtyend = eoffset;
2082 }
2083 }
2095 if (!hadvmlock)
2096 mtx_unlock(&vm_mtx);
2097}
2098
2099/*
2100 * getblk:
2101 *
2102 * Get a block given a specified block and offset into a file/device.
2103 * The buffers B_DONE bit will be cleared on return, making it almost
2104 * ready for an I/O initiation. B_INVAL may or may not be set on

--- 1088 unchanged lines hidden (view full) ---

3193 bp->b_resid = 0;
3194 mtx_unlock(&vm_mtx);
3195 } else {
3196 clrbuf(bp);
3197 }
3198}
3199
3200/*
2084}
2085
2086/*
2087 * getblk:
2088 *
2089 * Get a block given a specified block and offset into a file/device.
2090 * The buffers B_DONE bit will be cleared on return, making it almost
2091 * ready for an I/O initiation. B_INVAL may or may not be set on

--- 1088 unchanged lines hidden (view full) ---

3180 bp->b_resid = 0;
3181 mtx_unlock(&vm_mtx);
3182 } else {
3183 clrbuf(bp);
3184 }
3185}
3186
3187/*
3201 * vm_hold_load_pages and vm_hold_unload pages get pages into
3188 * vm_hold_load_pages and vm_hold_free_pages get pages into
3202 * a buffers address space. The pages are anonymous and are
3203 * not associated with a file object.
3204 *
3205 * vm_mtx should not be held
3206 */
3207static void
3208vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3209{

--- 36 unchanged lines hidden (view full) ---

3246}
3247
3248void
3249vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3250{
3251 vm_offset_t pg;
3252 vm_page_t p;
3253 int index, newnpages;
3189 * a buffers address space. The pages are anonymous and are
3190 * not associated with a file object.
3191 *
3192 * vm_mtx should not be held
3193 */
3194static void
3195vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3196{

--- 36 unchanged lines hidden (view full) ---

3233}
3234
3235void
3236vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3237{
3238 vm_offset_t pg;
3239 vm_page_t p;
3240 int index, newnpages;
3254 int hadvmlock;
3255
3241
3242 mtx_assert(&vm_mtx, MA_NOTOWNED);
3256 from = round_page(from);
3257 to = round_page(to);
3258 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3259
3243 from = round_page(from);
3244 to = round_page(to);
3245 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3246
3260 hadvmlock = mtx_owned(&vm_mtx);
3261 if (!hadvmlock)
3262 mtx_lock(&vm_mtx);
3247 mtx_lock(&vm_mtx);
3263 for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3264 p = bp->b_pages[index];
3265 if (p && (index < bp->b_npages)) {
3266 if (p->busy) {
3267 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
3268 bp->b_blkno, bp->b_lblkno);
3269 }
3270 bp->b_pages[index] = NULL;
3271 pmap_kremove(pg);
3272 vm_page_busy(p);
3273 vm_page_unwire(p, 0);
3274 vm_page_free(p);
3275 }
3276 }
3277 bp->b_npages = newnpages;
3248 for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3249 p = bp->b_pages[index];
3250 if (p && (index < bp->b_npages)) {
3251 if (p->busy) {
3252 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
3253 bp->b_blkno, bp->b_lblkno);
3254 }
3255 bp->b_pages[index] = NULL;
3256 pmap_kremove(pg);
3257 vm_page_busy(p);
3258 vm_page_unwire(p, 0);
3259 vm_page_free(p);
3260 }
3261 }
3262 bp->b_npages = newnpages;
3278 if (!hadvmlock)
3279 mtx_unlock(&vm_mtx);
3263 mtx_unlock(&vm_mtx);
3280}
3281
3282
3283#include "opt_ddb.h"
3284#ifdef DDB
3285#include <ddb/ddb.h>
3286
3287DB_SHOW_COMMAND(buffer, db_show_buffer)

--- 31 unchanged lines hidden ---
3264}
3265
3266
3267#include "opt_ddb.h"
3268#ifdef DDB
3269#include <ddb/ddb.h>
3270
3271DB_SHOW_COMMAND(buffer, db_show_buffer)

--- 31 unchanged lines hidden ---