Deleted Added
full compact
vm_page.c (44880) vm_page.c (45347)
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 20 unchanged lines hidden (view full) ---

29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 20 unchanged lines hidden (view full) ---

29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
37 * $Id: vm_page.c,v 1.127 1999/02/24 21:26:26 dillon Exp $
37 * $Id: vm_page.c,v 1.128 1999/03/19 05:21:03 alc Exp $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *

--- 95 unchanged lines hidden (view full) ---

141vm_page_t vm_page_array = 0;
142static int vm_page_array_size = 0;
143long first_page = 0;
144static long last_page;
145static vm_size_t page_mask;
146static int page_shift;
147int vm_page_zero_count = 0;
148
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *

--- 95 unchanged lines hidden (view full) ---

141vm_page_t vm_page_array = 0;
142static int vm_page_array_size = 0;
143long first_page = 0;
144static long last_page;
145static vm_size_t page_mask;
146static int page_shift;
147int vm_page_zero_count = 0;
148
149/*
150 * map of contiguous valid DEV_BSIZE chunks in a page
151 * (this list is valid for page sizes upto 16*DEV_BSIZE)
152 */
153static u_short vm_page_dev_bsize_chunks[] = {
154 0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
155 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
156};
157
158static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
159static void vm_page_free_wakeup __P((void));
160
161/*
162 * vm_set_page_size:
163 *
164 * Sets the page size, perhaps based upon the memory
165 * size. Must be called before any use of page-size

--- 1271 unchanged lines hidden (view full) ---

1437 return NULL;
1438 goto retrylookup;
1439 }
1440
1441 return m;
1442}
1443
1444/*
149static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex));
150static void vm_page_free_wakeup __P((void));
151
152/*
153 * vm_set_page_size:
154 *
155 * Sets the page size, perhaps based upon the memory
156 * size. Must be called before any use of page-size

--- 1271 unchanged lines hidden (view full) ---

1428 return NULL;
1429 goto retrylookup;
1430 }
1431
1432 return m;
1433}
1434
1435/*
1445 * mapping function for valid bits or for dirty bits in
1436 * Mapping function for valid bits or for dirty bits in
1446 * a page. May not block.
1437 * a page. May not block.
1438 *
1439 * Inputs are required to range within a page.
1447 */
1440 */
1441
1448__inline int
1449vm_page_bits(int base, int size)
1450{
1442__inline int
1443vm_page_bits(int base, int size)
1444{
1451 u_short chunk;
1445 int first_bit;
1446 int last_bit;
1452
1447
1453 if ((base == 0) && (size >= PAGE_SIZE))
1454 return VM_PAGE_BITS_ALL;
1448 KASSERT(
1449 base + size <= PAGE_SIZE,
1450 ("vm_page_bits: illegal base/size %d/%d", base, size)
1451 );
1455
1452
1456 size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1457 base &= PAGE_MASK;
1458 if (size > PAGE_SIZE - base) {
1459 size = PAGE_SIZE - base;
1460 }
1453 if (size == 0) /* handle degenerate case */
1454 return(0);
1461
1455
1462 base = base / DEV_BSIZE;
1463 chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1464 return (chunk << base) & VM_PAGE_BITS_ALL;
1456 first_bit = base >> DEV_BSHIFT;
1457 last_bit = (base + size - 1) >> DEV_BSHIFT;
1458
1459 return ((2 << last_bit) - (1 << first_bit));
1465}
1466
1467/*
1468 * set a page valid and clean. May not block.
1460}
1461
1462/*
1463 * set a page valid and clean. May not block.
1464 *
1465 * In order to maintain consistancy due to the DEV_BSIZE granularity
1466 * of the valid bits, we have to zero non-DEV_BSIZE aligned portions of
1467 * the page at the beginning and end of the valid range when the
1468 * associated valid bits are not already set.
1469 *
1470 * (base + size) must be less then or equal to PAGE_SIZE.
1469 */
1470void
1471vm_page_set_validclean(m, base, size)
1472 vm_page_t m;
1473 int base;
1474 int size;
1475{
1471 */
1472void
1473vm_page_set_validclean(m, base, size)
1474 vm_page_t m;
1475 int base;
1476 int size;
1477{
1476 int pagebits = vm_page_bits(base, size);
1478 int pagebits;
1479 int frag;
1480 int endoff;
1481
1482 if (size == 0) /* handle degenerate case */
1483 return;
1484
1485 /*
1486 * If the base is not DEV_BSIZE aligned and the valid
1487 * bit is clear, we have to zero out a portion of the
1488 * first block.
1489 */
1490
1491 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1492 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1493 ) {
1494 pmap_zero_page_area(
1495 VM_PAGE_TO_PHYS(m),
1496 frag,
1497 base - frag
1498 );
1499 }
1500
1501 /*
1502 * If the ending offset is not DEV_BSIZE aligned and the
1503 * valid bit is clear, we have to zero out a portion of
1504 * the last block.
1505 */
1506
1507 endoff = base + size;
1508
1509 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1510 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1511 ) {
1512 pmap_zero_page_area(
1513 VM_PAGE_TO_PHYS(m),
1514 endoff,
1515 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1516 );
1517 }
1518
1519 /*
1520 * Set valid, clear dirty bits. If validating the entire
1521 * page we can safely clear the pmap modify bit.
1522 */
1523
1524 pagebits = vm_page_bits(base, size);
1477 m->valid |= pagebits;
1478 m->dirty &= ~pagebits;
1525 m->valid |= pagebits;
1526 m->dirty &= ~pagebits;
1479 if( base == 0 && size == PAGE_SIZE)
1527
1528 if (base == 0 && size == PAGE_SIZE)
1480 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1481}
1482
1483/*
1484 * set a page (partially) invalid. May not block.
1485 */
1486void
1487vm_page_set_invalid(m, base, size)

--- 5 unchanged lines hidden (view full) ---

1493
1494 m->valid &= ~(bits = vm_page_bits(base, size));
1495 if (m->valid == 0)
1496 m->dirty &= ~bits;
1497 m->object->generation++;
1498}
1499
1500/*
1529 pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1530}
1531
1532/*
1533 * set a page (partially) invalid. May not block.
1534 */
1535void
1536vm_page_set_invalid(m, base, size)

--- 5 unchanged lines hidden (view full) ---

1542
1543 m->valid &= ~(bits = vm_page_bits(base, size));
1544 if (m->valid == 0)
1545 m->dirty &= ~bits;
1546 m->object->generation++;
1547}
1548
1549/*
1501 * is (partial) page valid? May not block.
1550 * vm_page_zero_invalid()
1551 *
1552 * The kernel assumes that the invalid portions of a page contain
1553 * garbage, but such pages can be mapped into memory by user code.
1554 * When this occurs, we must zero out the non-valid portions of the
1555 * page so user code sees what it expects.
1556 *
1557 * Pages are most often semi-valid when the end of a file is mapped
1558 * into memory and the file's size is not page aligned.
1502 */
1559 */
1560
1561void
1562vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1563{
1564 int b;
1565 int i;
1566
1567 /*
1568 * Scan the valid bits looking for invalid sections that
1569 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
1570 * valid bit may be set ) have already been zerod by
1571 * vm_page_set_validclean().
1572 */
1573
1574 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1575 if (i == (PAGE_SIZE / DEV_BSIZE) ||
1576 (m->valid & (1 << i))
1577 ) {
1578 if (i > b) {
1579 pmap_zero_page_area(
1580 VM_PAGE_TO_PHYS(m),
1581 b << DEV_BSHIFT,
1582 (i - b) << DEV_BSHIFT
1583 );
1584 }
1585 b = i + 1;
1586 }
1587 }
1588
1589 /*
1590 * setvalid is TRUE when we can safely set the zero'd areas
1591 * as being valid. We can do this if there are no cache consistancy
1592 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
1593 */
1594
1595 if (setvalid)
1596 m->valid = VM_PAGE_BITS_ALL;
1597}
1598
1599/*
1600 * vm_page_is_valid:
1601 *
1602 * Is (partial) page valid? Note that the case where size == 0
1603 * will return FALSE in the degenerate case where the page is
1604 * entirely invalid, and TRUE otherwise.
1605 *
1606 * May not block.
1607 */
1608
1503int
1504vm_page_is_valid(m, base, size)
1505 vm_page_t m;
1506 int base;
1507 int size;
1508{
1509 int bits = vm_page_bits(base, size);
1510

--- 272 unchanged lines hidden ---
1609int
1610vm_page_is_valid(m, base, size)
1611 vm_page_t m;
1612 int base;
1613 int size;
1614{
1615 int bits = vm_page_bits(base, size);
1616

--- 272 unchanged lines hidden ---