Deleted Added
full compact
26c26
< * $FreeBSD: head/sys/sparc64/include/tlb.h 93687 2002-04-02 17:50:13Z tmm $
---
> * $FreeBSD: head/sys/sparc64/include/tlb.h 96998 2002-05-20 16:10:17Z jake $
86,91c86,88
< /*
< * Some tlb operations must be atomic, so no interrupt or trap can be allowed
< * while they are in progress. Traps should not happen, but interrupts need to
< * be explicitely disabled. critical_enter() cannot be used here, since it only
< * disables soft interrupts.
< */
---
> void tlb_context_demap(struct pmap *pm);
> void tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va);
> void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end);
93,186d89
< static __inline void
< tlb_context_demap(struct pmap *pm)
< {
< void *cookie;
< u_long s;
<
< /*
< * It is important that we are not interrupted or preempted while
< * doing the IPIs. The interrupted CPU may hold locks, and since
< * it will wait for the CPU that sent the IPI, this can lead
< * to a deadlock when an interrupt comes in on that CPU and it's
< * handler tries to grab one of that locks. This will only happen for
< * spin locks, but these IPI types are delivered even if normal
< * interrupts are disabled, so the lock critical section will not
< * protect the target processor from entering the IPI handler with
< * the lock held.
< */
< critical_enter();
< cookie = ipi_tlb_context_demap(pm);
< if (pm->pm_active & PCPU_GET(cpumask)) {
< KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
< ("tlb_context_demap: inactive pmap?"));
< s = intr_disable();
< stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
< stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
< membar(Sync);
< intr_restore(s);
< }
< ipi_wait(cookie);
< critical_exit();
< }
<
< static __inline void
< tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
< {
< u_long flags;
< void *cookie;
< u_long s;
<
< critical_enter();
< cookie = ipi_tlb_page_demap(tlb, pm, va);
< if (pm->pm_active & PCPU_GET(cpumask)) {
< KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
< ("tlb_page_demap: inactive pmap?"));
< if (pm == kernel_pmap)
< flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
< else
< flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
<
< s = intr_disable();
< if (tlb & TLB_DTLB) {
< stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
< membar(Sync);
< }
< if (tlb & TLB_ITLB) {
< stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
< membar(Sync);
< }
< intr_restore(s);
< }
< ipi_wait(cookie);
< critical_exit();
< }
<
< static __inline void
< tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
< {
< vm_offset_t va;
< void *cookie;
< u_long flags;
< u_long s;
<
< critical_enter();
< cookie = ipi_tlb_range_demap(pm, start, end);
< if (pm->pm_active & PCPU_GET(cpumask)) {
< KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
< ("tlb_range_demap: inactive pmap?"));
< if (pm == kernel_pmap)
< flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
< else
< flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
<
< s = intr_disable();
< for (va = start; va < end; va += PAGE_SIZE) {
< stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
< stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
< membar(Sync);
< }
< intr_restore(s);
< }
< ipi_wait(cookie);
< critical_exit();
< }
<