Lines Matching refs:kva

253 	uint8_t			*kva;
257 * of this request's kva region.
526 * \brief Array of memoized bounce buffer kva offsets used
597 * Global pool of kva used for mapping remote domain ring
600 vm_offset_t kva;
602 /** Psuedo-physical address corresponding to kva. */
605 /** The size of the global kva pool. */
758 * associated with our per-instance kva region.
879 * calculate an offset into a request's kva region.
881 * \param reqlist The request structure whose kva region will be accessed.
882 * \param pagenr The page index used to compute the kva offset.
884 * kva offset.
891 return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
964 (uintptr_t)(reqlist->kva - xbb->kva) +
1035 free_kva = xbb->kva +
1038 KASSERT(free_kva >= (uint8_t *)xbb->kva &&
1042 "kva = %#jx, ring VA = %#jx\n", free_kva,
1043 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
1075 start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
1132 reqlist->kva = NULL;
1163 if (reqlist->kva != NULL)
1164 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1474 memcpy((uint8_t *)reqlist->kva + kva_offset,
1530 reqlist->kva = NULL;
1532 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1533 if (reqlist->kva == NULL) {
2194 (uint8_t *)reqlist->kva + kva_offset,
2790 if (xbb->kva != 0) {
2792 kmem_free(kernel_map, xbb->kva, xbb->kva_size);
2802 xbb->kva = 0;
2911 * Kva for our ring is at the tail of the region of kva allocated
2914 xbb->ring_config.va = xbb->kva
3031 xbb->kva = kmem_alloc_nofault(kernel_map, xbb->kva_size);
3032 if (xbb->kva == 0)
3034 xbb->gnt_base_addr = xbb->kva;
3038 * into kva. These pages will only be backed by machine
3048 xbb->kva = 0;
3051 xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
3055 DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n",
3056 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,