Lines Matching refs:domain

69 static int domain_unmap_buf_locked(struct dmar_domain *domain,
163 domain_get_idmap_pgtbl(struct dmar_domain *domain, dmar_gaddr_t maxaddr)
176 for (i = 0; i < domain->pglvl; i++) {
177 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) {
194 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
199 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
213 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
218 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
227 tbl->pglvl = domain->pglvl;
254 unit = domain->dmar;
323 domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
326 base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
337 domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
342 KASSERT(lvl >= 0 && lvl < domain->pglvl,
343 ("wrong lvl %p %d", domain, lvl));
346 idx = domain_pgtbl_pte_off(domain, base, i) +
353 domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
361 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
364 idx = domain_pgtbl_get_pindex(domain, base, lvl);
372 pte = dmar_map_pgtbl(domain->pgtbl_obj, idx, flags, sf);
375 ("lost root page table page %p", domain));
381 m = dmar_pgalloc(domain->pgtbl_obj, idx, flags |
396 ptep = domain_pgtbl_map_pte(domain, base, lvl - 1,
400 ("loosing root page %p", domain));
402 dmar_pgfree(domain->pgtbl_obj, m->pindex,
408 dmar_flush_pte_to_ram(domain->dmar, ptep);
416 pte += domain_pgtbl_pte_off(domain, base, lvl);
421 domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
431 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
441 pg_sz = domain_page_size(domain, lvl);
443 if (lvl == domain->pglvl - 1)
449 if (!domain_is_sp_lvl(domain, lvl))
473 ("mapping loop overflow %p %jx %jx %jx", domain,
476 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
479 ("failed waitable pte alloc %p", domain));
482 domain_unmap_buf_locked(domain, base1, base - base1,
489 dmar_flush_pte_to_ram(domain->dmar, pte);
499 domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size,
505 unit = domain->dmar;
507 KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0,
508 ("modifying idmap pagetable domain %p", domain));
510 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
513 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
515 KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base,
517 KASSERT(base < (1ULL << domain->agaw),
518 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
519 (uintmax_t)size, domain->agaw));
520 KASSERT(base + size < (1ULL << domain->agaw),
521 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
522 (uintmax_t)size, domain->agaw));
524 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
534 domain, (uintmax_t)pflags));
538 domain, (uintmax_t)pflags));
541 DMAR_DOMAIN_PGLOCK(domain);
542 error = domain_map_buf_locked(domain, base, size, ma, pflags, flags);
543 DMAR_DOMAIN_PGUNLOCK(domain);
548 domain_flush_iotlb_sync(domain, base, size);
558 static void domain_unmap_clear_pte(struct dmar_domain *domain,
563 domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base,
571 pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
572 domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true);
576 domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
582 dmar_flush_pte_to_ram(domain->dmar, pte);
592 ("lost reference (lvl) on root pg domain %p base %jx lvl %d",
593 domain, (uintmax_t)base, lvl));
595 ("lost reference (idx) on root pg domain %p base %jx lvl %d",
596 domain, (uintmax_t)base, lvl));
597 dmar_pgfree(domain->pgtbl_obj, m->pindex, flags);
598 domain_free_pgtbl_pde(domain, base, lvl - 1, flags);
605 domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
614 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
618 KASSERT((domain->flags & DMAR_DOMAIN_IDMAP) == 0,
619 ("modifying idmap pagetable domain %p", domain));
621 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
624 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
626 KASSERT(base < (1ULL << domain->agaw),
627 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
628 (uintmax_t)size, domain->agaw));
629 KASSERT(base + size < (1ULL << domain->agaw),
630 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
631 (uintmax_t)size, domain->agaw));
633 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
642 for (lvl = 0; lvl < domain->pglvl; lvl++) {
643 if (lvl != domain->pglvl - 1 &&
644 !domain_is_sp_lvl(domain, lvl))
646 pg_sz = domain_page_size(domain, lvl);
649 pte = domain_pgtbl_map_pte(domain, base, lvl, flags,
653 domain, (uintmax_t)base, lvl, flags));
655 lvl == domain->pglvl - 1) {
656 domain_unmap_clear_pte(domain, base, lvl,
662 ("unmapping loop overflow %p %jx %jx %jx", domain,
677 domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
682 DMAR_DOMAIN_PGLOCK(domain);
683 error = domain_unmap_buf_locked(domain, base, size, flags);
684 DMAR_DOMAIN_PGUNLOCK(domain);
689 domain_alloc_pgtbl(struct dmar_domain *domain)
693 KASSERT(domain->pgtbl_obj == NULL,
694 ("already initialized %p", domain));
696 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
697 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
698 DMAR_DOMAIN_PGLOCK(domain);
699 m = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_WAITOK |
703 DMAR_DOMAIN_PGUNLOCK(domain);
704 DMAR_LOCK(domain->dmar);
705 domain->flags |= DMAR_DOMAIN_PGTBL_INITED;
706 DMAR_UNLOCK(domain->dmar);
711 domain_free_pgtbl(struct dmar_domain *domain)
716 obj = domain->pgtbl_obj;
718 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
719 (domain->flags & DMAR_DOMAIN_IDMAP) != 0,
720 ("lost pagetable object domain %p", domain));
723 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
724 domain->pgtbl_obj = NULL;
726 if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0) {
728 domain->flags &= ~DMAR_DOMAIN_IDMAP;
757 domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
765 unit = domain->dmar;
772 DMAR_IOTLB_DID(domain->domain), iro);
783 DMAR_IOTLB_DID(domain->domain), iro);
792 * address space for the domain.