• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/ofed/drivers/infiniband/core/

Lines Matching defs:umem

51 	/* Only update private counters for this umem if it has them.
52 * Otherwise skip it. All page faults will be delayed for this umem. */
69 /* Only update private counters for this umem if it has them.
70 * Otherwise skip it. All page faults will be delayed for this umem. */
134 /* Make sure that the fact the umem is dying is out before we release
245 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
264 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
265 if (!umem->odp_data) {
269 umem->odp_data->umem = umem;
271 mutex_init(&umem->odp_data->umem_mutex);
273 init_completion(&umem->odp_data->notifier_completion);
275 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
276 sizeof(*umem->odp_data->page_list));
277 if (!umem->odp_data->page_list) {
282 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
283 sizeof(*umem->odp_data->dma_list));
284 if (!umem->odp_data->dma_list) {
296 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
297 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
301 umem->odp_data->mn_counters_active = true;
303 list_add(&umem->odp_data->no_private_counters,
340 vfree(umem->odp_data->dma_list);
342 vfree(umem->odp_data->page_list);
344 kfree(umem->odp_data);
350 void ib_umem_odp_release(struct ib_umem *umem)
352 struct ib_ucontext *context = umem->context;
355 * Ensure that no more pages are mapped in the umem.
360 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
361 ib_umem_end(umem));
364 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
365 rbt_ib_umem_remove(&umem->odp_data->interval_tree,
368 if (!umem->odp_data->mn_counters_active) {
369 list_del(&umem->odp_data->no_private_counters);
370 complete_all(&umem->odp_data->notifier_completion);
411 vfree(umem->odp_data->dma_list);
412 vfree(umem->odp_data->page_list);
413 kfree(umem->odp_data);
414 kfree(umem);
420 * @umem: the umem to insert the page to.
421 * @page_index: index in the umem to add the page to.
426 * umem->odp_data->notifiers_seq.
433 * umem.
436 struct ib_umem *umem,
443 struct ib_device *dev = umem->context->device;
454 if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
458 if (!(umem->odp_data->dma_list[page_index])) {
467 umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
468 umem->odp_data->page_list[page_index] = page;
470 } else if (umem->odp_data->page_list[page_index] == page) {
471 umem->odp_data->dma_list[page_index] |= access_mask;
474 umem->odp_data->page_list[page_index], page);
482 if (umem->context->invalidate_range || !stored_page)
485 if (remove_existing_mapping && umem->context->invalidate_range) {
487 umem,
502 * umem->odp_data->dma_list.
509 * @umem: the umem to map and pin
519 * umem->odp_data->notifiers_seq before calling this function
521 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
535 if (user_virt < ib_umem_start(umem) ||
536 user_virt + bcnt > ib_umem_end(umem))
548 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
563 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
589 mutex_lock(&umem->odp_data->umem_mutex);
592 umem, k, base_virt_addr, local_page_list[j],
598 mutex_unlock(&umem->odp_data->umem_mutex);
624 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
629 struct ib_device *dev = umem->context->device;
631 virt = max_t(u64, virt, ib_umem_start(umem));
632 bound = min_t(u64, bound, ib_umem_end(umem));
638 mutex_lock(&umem->odp_data->umem_mutex);
639 for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
640 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
641 if (umem->odp_data->page_list[idx]) {
642 struct page *page = umem->odp_data->page_list[idx];
643 dma_addr_t dma = umem->odp_data->dma_list[idx];
664 if (!umem->context->invalidate_range)
666 umem->odp_data->page_list[idx] = NULL;
667 umem->odp_data->dma_list[idx] = 0;
670 mutex_unlock(&umem->odp_data->umem_mutex);