Lines Matching refs:bdev

47 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
50 check_bodev_null_return(bdev, -EINVAL);
51 var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
65 bo->bdev = bdev;
68 bo->start = bdev->start;
111 rb_erase(&this->node, &this->bdev->free_rbtree);
236 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
244 new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
249 ret = __bo_init(bdev, new_bo, pgnr);
252 kmem_cache_free(bdev->bo_cache, new_bo);
261 spin_lock_irqsave(&bdev->list_lock, flags);
263 spin_unlock_irqrestore(&bdev->list_lock, flags);
270 struct hmm_bo_device *bdev = bo->bdev;
277 rb_erase(&bo->node, &bdev->free_rbtree);
285 rb_erase(&bo->node, &bdev->free_rbtree);
286 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
312 struct hmm_bo_device *bdev;
315 bdev = bo->bdev;
319 spin_lock_irqsave(&bdev->list_lock, flags);
321 spin_unlock_irqrestore(&bdev->list_lock, flags);
323 kmem_cache_free(bo->bdev->bo_cache, bo);
331 int hmm_bo_device_init(struct hmm_bo_device *bdev,
340 check_bodev_null_return(bdev, -EINVAL);
342 ret = isp_mmu_init(&bdev->mmu, mmu_driver);
348 bdev->start = vaddr_start;
349 bdev->pgnr = size_to_pgnr_ceil(size);
350 bdev->size = pgnr_to_size(bdev->pgnr);
352 spin_lock_init(&bdev->list_lock);
353 mutex_init(&bdev->rbtree_mutex);
355 bdev->flag = HMM_BO_DEVICE_INITED;
357 INIT_LIST_HEAD(&bdev->entire_bo_list);
358 bdev->allocated_rbtree = RB_ROOT;
359 bdev->free_rbtree = RB_ROOT;
361 bdev->bo_cache = kmem_cache_create("bo_cache",
363 if (!bdev->bo_cache) {
365 isp_mmu_exit(&bdev->mmu);
369 bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
372 isp_mmu_exit(&bdev->mmu);
376 ret = __bo_init(bdev, bo, bdev->pgnr);
379 kmem_cache_free(bdev->bo_cache, bo);
380 isp_mmu_exit(&bdev->mmu);
384 spin_lock_irqsave(&bdev->list_lock, flags);
385 list_add_tail(&bo->list, &bdev->entire_bo_list);
386 spin_unlock_irqrestore(&bdev->list_lock, flags);
388 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
393 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
397 struct rb_root *root = &bdev->free_rbtree;
399 check_bodev_null_return(bdev, NULL);
400 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
408 mutex_lock(&bdev->rbtree_mutex);
411 mutex_unlock(&bdev->rbtree_mutex);
418 new_bo = __bo_break_up(bdev, bo, pgnr);
420 mutex_unlock(&bdev->rbtree_mutex);
426 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
427 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
429 mutex_unlock(&bdev->rbtree_mutex);
433 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
435 mutex_unlock(&bdev->rbtree_mutex);
441 struct hmm_bo_device *bdev = bo->bdev;
444 mutex_lock(&bdev->rbtree_mutex);
457 mutex_unlock(&bdev->rbtree_mutex);
476 rb_erase(&bo->node, &bdev->allocated_rbtree);
481 if (bo->list.prev != &bdev->entire_bo_list &&
488 if (bo->list.next != &bdev->entire_bo_list &&
495 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
497 mutex_unlock(&bdev->rbtree_mutex);
501 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
508 check_bodev_null_return_void(bdev);
514 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
516 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
522 while (!list_empty(&bdev->entire_bo_list)) {
523 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
525 spin_lock_irqsave(&bdev->list_lock, flags);
527 spin_unlock_irqrestore(&bdev->list_lock, flags);
529 kmem_cache_free(bdev->bo_cache, bo);
534 kmem_cache_destroy(bdev->bo_cache);
536 isp_mmu_exit(&bdev->mmu);
539 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
541 check_bodev_null_return(bdev, -EINVAL);
543 return bdev->flag == HMM_BO_DEVICE_INITED;
554 struct hmm_bo_device *bdev, ia_css_ptr vaddr)
558 check_bodev_null_return(bdev, NULL);
560 mutex_lock(&bdev->rbtree_mutex);
561 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
563 mutex_unlock(&bdev->rbtree_mutex);
568 mutex_unlock(&bdev->rbtree_mutex);
574 struct hmm_bo_device *bdev, unsigned int vaddr)
578 check_bodev_null_return(bdev, NULL);
580 mutex_lock(&bdev->rbtree_mutex);
581 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
583 mutex_unlock(&bdev->rbtree_mutex);
588 mutex_unlock(&bdev->rbtree_mutex);
594 struct hmm_bo_device *bdev, const void *vaddr)
600 check_bodev_null_return(bdev, NULL);
602 spin_lock_irqsave(&bdev->list_lock, flags);
603 list_for_each(pos, &bdev->entire_bo_list) {
611 spin_unlock_irqrestore(&bdev->list_lock, flags);
614 spin_unlock_irqrestore(&bdev->list_lock, flags);
774 struct hmm_bo_device *bdev;
787 bdev = bo->bdev;
793 isp_mmu_map(&bdev->mmu, virt,
812 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
825 isp_mmu_unmap(&bdev->mmu, virt, 1);
851 struct hmm_bo_device *bdev;
863 bdev = bo->bdev;
868 isp_mmu_unmap(&bdev->mmu, virt, 1);
876 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,