Deleted Added
full compact
mmu_oea.c (116328) mmu_oea.c (116355)
1/*
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 77 unchanged lines hidden (view full) ---

86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 77 unchanged lines hidden (view full) ---

86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93#include <sys/cdefs.h>
94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 116328 2003-06-14 06:20:25Z alc $");
94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 116355 2003-06-14 23:23:55Z alc $");
95
96/*
97 * Manages physical address maps.
98 *
99 * In addition to hardware address maps, this module is called upon to
100 * provide software-use-only maps which may or may not be stored in the
101 * same form as hardware maps. These pseudo-maps are used to store
102 * intermediate results from copy operations to and from address spaces.

--- 1448 unchanged lines hidden (view full) ---

1551pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1552{
1553
1554 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1555 ("pmap_remove_pages: non current pmap"));
1556 pmap_remove(pm, sva, eva);
1557}
1558
95
96/*
97 * Manages physical address maps.
98 *
99 * In addition to hardware address maps, this module is called upon to
100 * provide software-use-only maps which may or may not be stored in the
101 * same form as hardware maps. These pseudo-maps are used to store
102 * intermediate results from copy operations to and from address spaces.

--- 1448 unchanged lines hidden (view full) ---

1551pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1552{
1553
1554 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1555 ("pmap_remove_pages: non current pmap"));
1556 pmap_remove(pm, sva, eva);
1557}
1558
1559#ifndef KSTACK_MAX_PAGES
1560#define KSTACK_MAX_PAGES 32
1561#endif
1562
1563/*
1559/*
1564 * Create the kernel stack and pcb for a new thread.
1565 * This routine directly affects the fork perf for a process and
1566 * create performance for a thread.
1567 */
1568void
1569pmap_new_thread(struct thread *td, int pages)
1570{
1571 vm_page_t ma[KSTACK_MAX_PAGES];
1572 vm_object_t ksobj;
1573 vm_offset_t ks;
1574 vm_page_t m;
1575 u_int i;
1576
1577 /* Bounds check */
1578 if (pages <= 1)
1579 pages = KSTACK_PAGES;
1580 else if (pages > KSTACK_MAX_PAGES)
1581 pages = KSTACK_MAX_PAGES;
1582
1583 /*
1584 * Allocate object for the kstack.
1585 */
1586 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
1587 td->td_kstack_obj = ksobj;
1588
1589 /*
1590 * Get a kernel virtual address for the kstack for this thread.
1591 */
1592 ks = kmem_alloc_nofault(kernel_map,
1593 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
1594 if (ks == 0)
1595 panic("pmap_new_thread: kstack allocation failed");
1596 TLBIE(ks);
1597 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
1598 td->td_kstack = ks;
1599
1600 /*
1601 * Knowing the number of pages allocated is useful when you
1602 * want to deallocate them.
1603 */
1604 td->td_kstack_pages = pages;
1605
1606 for (i = 0; i < pages; i++) {
1607 /*
1608 * Get a kernel stack page.
1609 */
1610 m = vm_page_grab(ksobj, i,
1611 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
1612 ma[i] = m;
1613
1614 vm_page_lock_queues();
1615 vm_page_wakeup(m);
1616 vm_page_flag_clear(m, PG_ZERO);
1617 m->valid = VM_PAGE_BITS_ALL;
1618 vm_page_unlock_queues();
1619 }
1620
1621 /*
1622 * Enter the page into the kernel address space
1623 */
1624 pmap_qenter(ks, ma, pages);
1625}
1626
1627void
1628pmap_dispose_thread(struct thread *td)
1629{
1630 vm_object_t ksobj;
1631 vm_offset_t ks;
1632 vm_page_t m;
1633 int i;
1634 int pages;
1635
1636 pages = td->td_kstack_pages;
1637 ksobj = td->td_kstack_obj;
1638 ks = td->td_kstack;
1639 for (i = 0; i < pages ; i++) {
1640 m = vm_page_lookup(ksobj, i);
1641 if (m == NULL)
1642 panic("pmap_dispose_thread: kstack already missing?");
1643 vm_page_lock_queues();
1644 vm_page_busy(m);
1645 vm_page_unwire(m, 0);
1646 vm_page_free(m);
1647 vm_page_unlock_queues();
1648 }
1649 pmap_qremove(ks, pages);
1650 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
1651 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
1652 vm_object_deallocate(ksobj);
1653}
1654
1655void
1656pmap_swapin_thread(struct thread *td)
1657{
1658 vm_page_t ma[KSTACK_MAX_PAGES];
1659 vm_object_t ksobj;
1660 vm_offset_t ks;
1661 vm_page_t m;
1662 int rv;
1663 int i;
1664 int pages;
1665
1666 pages = td->td_kstack_pages;
1667 ksobj = td->td_kstack_obj;
1668 ks = td->td_kstack;
1669 for (i = 0; i < pages; i++) {
1670 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1671 if (m->valid != VM_PAGE_BITS_ALL) {
1672 rv = vm_pager_get_pages(ksobj, &m, 1, 0);
1673 if (rv != VM_PAGER_OK)
1674 panic("pmap_swapin_thread: cannot get kstack");
1675 m = vm_page_lookup(ksobj, i);
1676 m->valid = VM_PAGE_BITS_ALL;
1677 }
1678 ma[i] = m;
1679 vm_page_lock_queues();
1680 vm_page_wire(m);
1681 vm_page_wakeup(m);
1682 vm_page_unlock_queues();
1683 }
1684 pmap_qenter(ks, ma, pages);
1685}
1686
1687
1688void
1689pmap_swapout_thread(struct thread *td)
1690{
1691 vm_object_t ksobj;
1692 vm_offset_t ks;
1693 vm_page_t m;
1694 int i;
1695 int pages;
1696
1697 pages = td->td_kstack_pages;
1698 ksobj = td->td_kstack_obj;
1699 ks = (vm_offset_t)td->td_kstack;
1700 for (i = 0; i < pages; i++) {
1701 m = vm_page_lookup(ksobj, i);
1702 if (m == NULL)
1703 panic("pmap_swapout_thread: kstack already missing?");
1704 vm_page_lock_queues();
1705 vm_page_dirty(m);
1706 vm_page_unwire(m, 0);
1707 vm_page_unlock_queues();
1708 }
1709 pmap_qremove(ks, pages);
1710}
1711
1712/*
1713 * Allocate a physical page of memory directly from the phys_avail map.
1714 * Can only be called from pmap_bootstrap before avail start and end are
1715 * calculated.
1716 */
1717static vm_offset_t
1718pmap_bootstrap_alloc(vm_size_t size, u_int align)
1719{
1720 vm_offset_t s, e;

--- 767 unchanged lines hidden ---
1560 * Allocate a physical page of memory directly from the phys_avail map.
1561 * Can only be called from pmap_bootstrap before avail start and end are
1562 * calculated.
1563 */
1564static vm_offset_t
1565pmap_bootstrap_alloc(vm_size_t size, u_int align)
1566{
1567 vm_offset_t s, e;

--- 767 unchanged lines hidden ---