Deleted Added
full compact
swap_pager.c (12904) swap_pager.c (13490)
1/*
2 * Copyright (c) 1994 John S. Dyson
3 * Copyright (c) 1990 University of Utah.
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer

--- 25 unchanged lines hidden (view full) ---

34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
40 *
41 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
1/*
2 * Copyright (c) 1994 John S. Dyson
3 * Copyright (c) 1990 University of Utah.
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer

--- 25 unchanged lines hidden (view full) ---

34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
40 *
41 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
42 * $Id: swap_pager.c,v 1.57 1995/12/14 09:54:52 phk Exp $
42 * $Id: swap_pager.c,v 1.58 1995/12/17 07:19:55 bde Exp $
43 */
44
45/*
46 * Quick hack to page to dedicated partition(s).
47 * TODO:
48 * Add multiprocessor locks
49 * Deal with async writes in a better fashion
50 */

--- 246 unchanged lines hidden (view full) ---

297 /*
298 * XXX - there is a race condition here. Two processes
299 * can request the same named object simultaneuously,
300 * and if one blocks for memory, the result is a disaster.
301 * Probably quite rare, but is yet another reason to just
302 * rip support of "named anonymous regions" out altogether.
303 */
304 object = vm_object_allocate(OBJT_SWAP,
43 */
44
45/*
46 * Quick hack to page to dedicated partition(s).
47 * TODO:
48 * Add multiprocessor locks
49 * Deal with async writes in a better fashion
50 */

--- 246 unchanged lines hidden (view full) ---

297 /*
298 * XXX - there is a race condition here. Two processes
299 * can request the same named object simultaneuously,
300 * and if one blocks for memory, the result is a disaster.
301 * Probably quite rare, but is yet another reason to just
302 * rip support of "named anonymous regions" out altogether.
303 */
304 object = vm_object_allocate(OBJT_SWAP,
305 OFF_TO_IDX(offset+ PAGE_SIZE - 1 + size));
305 OFF_TO_IDX(offset + PAGE_SIZE - 1) + size);
306 object->handle = handle;
307 (void) swap_pager_swp_alloc(object, M_WAITOK);
308 }
309 } else {
310 object = vm_object_allocate(OBJT_SWAP,
306 object->handle = handle;
307 (void) swap_pager_swp_alloc(object, M_WAITOK);
308 }
309 } else {
310 object = vm_object_allocate(OBJT_SWAP,
311 OFF_TO_IDX(offset + PAGE_SIZE - 1 + size));
311 OFF_TO_IDX(offset + PAGE_SIZE - 1) + size);
312 (void) swap_pager_swp_alloc(object, M_WAITOK);
313 }
314
315 return (object);
316}
317
318/*
319 * returns disk block associated with pager and offset

--- 941 unchanged lines hidden (view full) ---

1261 /*
1262 * get a swap pager clean data structure, block until we get it
1263 */
1264 if (swap_pager_free.tqh_first == NULL ||
1265 swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
1266 swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
1267 s = splbio();
1268 if (curproc == pageproc) {
312 (void) swap_pager_swp_alloc(object, M_WAITOK);
313 }
314
315 return (object);
316}
317
318/*
319 * returns disk block associated with pager and offset

--- 941 unchanged lines hidden (view full) ---

1261 /*
1262 * get a swap pager clean data structure, block until we get it
1263 */
1264 if (swap_pager_free.tqh_first == NULL ||
1265 swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
1266 swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
1267 s = splbio();
1268 if (curproc == pageproc) {
1269retryfree:
1269 /*
1270 * pageout daemon needs a swap control block
1271 */
1272 swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT|SWAP_FREE_NEEDED;
1273 /*
1274 * if it does not get one within a short time, then
1275 * there is a potential deadlock, so we go-on trying
1270 /*
1271 * pageout daemon needs a swap control block
1272 */
1273 swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT|SWAP_FREE_NEEDED;
1274 /*
1275 * if it does not get one within a short time, then
1276 * there is a potential deadlock, so we go-on trying
1276 * to free pages.
1277 * to free pages. It is important to block here as opposed
1278 * to returning, thereby allowing the pageout daemon to continue.
1279 * It is likely that pageout daemon will start suboptimally
1280 * reclaiming vnode backed pages if we don't block. Since the
1281 * I/O subsystem is probably already fully utilized, might as
1282 * well wait.
1277 */
1283 */
1278 tsleep(&swap_pager_free, PVM, "swpfre", hz/10);
1279 swap_pager_sync();
1280 if (swap_pager_free.tqh_first == NULL ||
1281 swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
1282 swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
1283 splx(s);
1284 return VM_PAGER_AGAIN;
1284 if (tsleep(&swap_pager_free, PVM, "swpfre", hz/5)) {
1285 swap_pager_sync();
1286 if (swap_pager_free.tqh_first == NULL ||
1287 swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
1288 swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
1289 splx(s);
1290 return VM_PAGER_AGAIN;
1291 }
1292 } else {
1293 /*
1294 * we make sure that pageouts aren't taking up all of
1295 * the free swap control blocks.
1296 */
1297 swap_pager_sync();
1298 if (swap_pager_free.tqh_first == NULL ||
1299 swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
1300 swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
1301 goto retryfree;
1302 }
1285 }
1303 }
1286 } else
1304 } else {
1287 pagedaemon_wakeup();
1305 pagedaemon_wakeup();
1288 while (swap_pager_free.tqh_first == NULL ||
1289 swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
1290 swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
1291 if (curproc == pageproc) {
1292 swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT;
1293 if((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_reserved)
1294 wakeup(&cnt.v_free_count);
1295 }
1296
1297 swap_pager_needflags |= SWAP_FREE_NEEDED;
1298 tsleep(&swap_pager_free, PVM, "swpfre", 0);
1299 if (curproc == pageproc)
1300 swap_pager_sync();
1301 else
1306 while (swap_pager_free.tqh_first == NULL ||
1307 swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
1308 swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
1309 swap_pager_needflags |= SWAP_FREE_NEEDED;
1310 tsleep(&swap_pager_free, PVM, "swpfre", 0);
1302 pagedaemon_wakeup();
1311 pagedaemon_wakeup();
1312 }
1303 }
1304 splx(s);
1305 }
1306 spc = swap_pager_free.tqh_first;
1307 TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
1308
1309 kva = spc->spc_kva;
1310

--- 120 unchanged lines hidden (view full) ---

1431 for (i = 0; i < count; i++) {
1432 if (rtvals[i] == VM_PAGER_OK) {
1433 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
1434 m[i]->dirty = 0;
1435 /*
1436 * optimization, if a page has been read
1437 * during the pageout process, we activate it.
1438 */
1313 }
1314 splx(s);
1315 }
1316 spc = swap_pager_free.tqh_first;
1317 TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
1318
1319 kva = spc->spc_kva;
1320

--- 120 unchanged lines hidden (view full) ---

1441 for (i = 0; i < count; i++) {
1442 if (rtvals[i] == VM_PAGER_OK) {
1443 pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
1444 m[i]->dirty = 0;
1445 /*
1446 * optimization, if a page has been read
1447 * during the pageout process, we activate it.
1448 */
1439 if ((m[i]->flags & PG_ACTIVE) == 0 &&
1449 if ((m[i]->queue != PQ_ACTIVE) &&
1440 ((m[i]->flags & (PG_WANTED|PG_REFERENCED)) ||
1441 pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))) {
1442 vm_page_activate(m[i]);
1443 }
1444 }
1445 }
1446 } else {
1447 for (i = 0; i < count; i++) {

--- 89 unchanged lines hidden (view full) ---

1537 for (i = 0; i < spc->spc_count; i++) {
1538 printf("swap_pager_finish: I/O error, clean of page %lx failed\n",
1539 (u_long) VM_PAGE_TO_PHYS(spc->spc_m[i]));
1540 }
1541 } else {
1542 for (i = 0; i < spc->spc_count; i++) {
1543 pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i]));
1544 spc->spc_m[i]->dirty = 0;
1450 ((m[i]->flags & (PG_WANTED|PG_REFERENCED)) ||
1451 pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))) {
1452 vm_page_activate(m[i]);
1453 }
1454 }
1455 }
1456 } else {
1457 for (i = 0; i < count; i++) {

--- 89 unchanged lines hidden (view full) ---

1547 for (i = 0; i < spc->spc_count; i++) {
1548 printf("swap_pager_finish: I/O error, clean of page %lx failed\n",
1549 (u_long) VM_PAGE_TO_PHYS(spc->spc_m[i]));
1550 }
1551 } else {
1552 for (i = 0; i < spc->spc_count; i++) {
1553 pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i]));
1554 spc->spc_m[i]->dirty = 0;
1545 if ((spc->spc_m[i]->flags & PG_ACTIVE) == 0 &&
1555 if ((spc->spc_m[i]->queue != PQ_ACTIVE) &&
1546 ((spc->spc_m[i]->flags & PG_WANTED) || pmap_is_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i]))))
1547 vm_page_activate(spc->spc_m[i]);
1548 }
1549 }
1550
1551
1552 for (i = 0; i < spc->spc_count; i++) {
1553 /*

--- 67 unchanged lines hidden ---
1556 ((spc->spc_m[i]->flags & PG_WANTED) || pmap_is_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i]))))
1557 vm_page_activate(spc->spc_m[i]);
1558 }
1559 }
1560
1561
1562 for (i = 0; i < spc->spc_count; i++) {
1563 /*

--- 67 unchanged lines hidden ---