Lines Matching refs:bo

47 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
59 memset(bo, 0, sizeof(*bo));
60 mutex_init(&bo->mutex);
62 /* init the bo->list HEAD as an element of entire_bo_list */
63 INIT_LIST_HEAD(&bo->list);
65 bo->bdev = bdev;
66 bo->vmap_addr = NULL;
67 bo->status = HMM_BO_FREE;
68 bo->start = bdev->start;
69 bo->pgnr = pgnr;
70 bo->end = bo->start + pgnr_to_size(pgnr);
71 bo->prev = NULL;
72 bo->next = NULL;
114 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
132 struct hmm_buffer_object *bo;
135 bo = rb_entry(n, struct hmm_buffer_object, node);
137 if (bo->start > start) {
141 } else if (bo->start < start) {
146 return bo;
157 struct hmm_buffer_object *bo;
160 bo = rb_entry(n, struct hmm_buffer_object, node);
162 if (bo->start > start) {
167 if (bo->end > start)
168 return bo;
179 struct hmm_buffer_object *bo)
184 unsigned int pgnr = bo->pgnr;
195 bo->prev = this;
196 bo->next = this->next;
198 this->next->prev = bo;
199 this->next = bo;
200 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
205 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
207 rb_link_node(&bo->node, parent, new);
208 rb_insert_color(&bo->node, root);
212 struct hmm_buffer_object *bo)
217 unsigned int start = bo->start;
229 kref_init(&bo->kref);
230 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
232 rb_link_node(&bo->node, parent, new);
233 rb_insert_color(&bo->node, root);
237 struct hmm_buffer_object *bo,
256 new_bo->start = bo->start;
258 bo->start = new_bo->end;
259 bo->pgnr = bo->pgnr - pgnr;
262 list_add_tail(&new_bo->list, &bo->list);
268 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
270 struct hmm_bo_device *bdev = bo->bdev;
271 /* There are 4 situations when we take off a known bo from free rbtree:
272 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
273 * and does not have a linked list after bo, to take off this bo,
274 * we just need erase bo directly and rebalance the free rbtree
276 if (!bo->prev && !bo->next) {
277 rb_erase(&bo->node, &bdev->free_rbtree);
278 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
279 * and has a linked list,to take off this bo we need erase bo
280 * first, then, insert bo->next into free rbtree and rebalance
283 } else if (!bo->prev && bo->next) {
284 bo->next->prev = NULL;
285 rb_erase(&bo->node, &bdev->free_rbtree);
286 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
287 bo->next = NULL;
288 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
289 * node, bo is the last element of the linked list after rbtree
290 * node, to take off this bo, we just need set the "prev/next"
293 } else if (bo->prev && !bo->next) {
294 bo->prev->next = NULL;
295 bo->prev = NULL;
296 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
297 * node, bo is in the middle of the linked list after rbtree node,
298 * to take off this bo, we just set take the "prev/next" pointers
301 } else if (bo->prev && bo->next) {
302 bo->next->prev = bo->prev;
303 bo->prev->next = bo->next;
304 bo->next = NULL;
305 bo->prev = NULL;
309 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
315 bdev = bo->bdev;
316 next_bo->start = bo->start;
317 next_bo->pgnr = next_bo->pgnr + bo->pgnr;
320 list_del(&bo->list);
323 kmem_cache_free(bo->bdev->bo_cache, bo);
336 struct hmm_buffer_object *bo;
369 bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
370 if (!bo) {
376 ret = __bo_init(bdev, bo, bdev->pgnr);
379 kmem_cache_free(bdev->bo_cache, bo);
385 list_add_tail(&bo->list, &bdev->entire_bo_list);
388 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
396 struct hmm_buffer_object *bo, *new_bo;
409 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
410 if (!bo) {
417 if (bo->pgnr > pgnr) {
418 new_bo = __bo_break_up(bdev, bo, pgnr);
427 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
433 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
436 return bo;
439 void hmm_bo_release(struct hmm_buffer_object *bo)
441 struct hmm_bo_device *bdev = bo->bdev;
449 * how to destroy the bo when it is stilled MMAPED?
456 if (bo->status & HMM_BO_MMAPED) {
458 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
462 if (bo->status & HMM_BO_BINDED) {
463 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
464 hmm_bo_unbind(bo);
467 if (bo->status & HMM_BO_PAGE_ALLOCED) {
469 hmm_bo_free_pages(bo);
471 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
473 hmm_bo_vunmap(bo);
476 rb_erase(&bo->node, &bdev->allocated_rbtree);
478 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
479 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
481 if (bo->list.prev != &bdev->entire_bo_list &&
482 prev_bo->end == bo->start &&
485 bo = __bo_merge(prev_bo, bo);
488 if (bo->list.next != &bdev->entire_bo_list &&
489 next_bo->start == bo->end &&
492 bo = __bo_merge(bo, next_bo);
495 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
503 struct hmm_buffer_object *bo;
512 * and all bos will be merged into a big bo
523 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
526 list_del(&bo->list);
529 kmem_cache_free(bdev->bo_cache, bo);
546 int hmm_bo_allocated(struct hmm_buffer_object *bo)
548 check_bo_null_return(bo, 0);
550 return bo->status & HMM_BO_ALLOCED;
556 struct hmm_buffer_object *bo;
561 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
562 if (!bo) {
564 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
570 return bo;
576 struct hmm_buffer_object *bo;
581 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
582 if (!bo) {
584 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
590 return bo;
597 struct hmm_buffer_object *bo;
604 bo = list_to_hmm_bo(pos);
605 /* pass bo which has no vm_node allocated */
606 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
608 if (bo->vmap_addr == vaddr)
615 return bo;
626 static void free_private_bo_pages(struct hmm_buffer_object *bo)
628 set_pages_array_wb(bo->pages, bo->pgnr);
629 free_pages_bulk_array(bo->pgnr, bo->pages);
633 static int alloc_private_pages(struct hmm_buffer_object *bo)
638 ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
639 if (ret != bo->pgnr) {
640 free_pages_bulk_array(ret, bo->pages);
645 ret = set_pages_array_uc(bo->pages, bo->pgnr);
648 free_pages_bulk_array(bo->pgnr, bo->pages);
655 static int alloc_vmalloc_pages(struct hmm_buffer_object *bo, void *vmalloc_addr)
660 for (i = 0; i < bo->pgnr; i++) {
661 bo->pages[i] = vmalloc_to_page(vaddr);
662 if (!bo->pages[i]) {
673 * allocate/free physical pages for the bo.
680 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
686 check_bo_null_return(bo, -EINVAL);
688 mutex_lock(&bo->mutex);
689 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
691 bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
692 if (unlikely(!bo->pages)) {
698 ret = alloc_private_pages(bo);
700 ret = alloc_vmalloc_pages(bo, vmalloc_addr);
708 bo->type = type;
710 bo->status |= HMM_BO_PAGE_ALLOCED;
712 mutex_unlock(&bo->mutex);
717 kfree(bo->pages);
718 mutex_unlock(&bo->mutex);
722 mutex_unlock(&bo->mutex);
729 * free physical pages of the bo.
731 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
733 check_bo_null_return_void(bo);
735 mutex_lock(&bo->mutex);
737 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
740 bo->status &= (~HMM_BO_PAGE_ALLOCED);
742 if (bo->type == HMM_BO_PRIVATE)
743 free_private_bo_pages(bo);
744 else if (bo->type == HMM_BO_VMALLOC)
749 kfree(bo->pages);
750 mutex_unlock(&bo->mutex);
755 mutex_unlock(&bo->mutex);
760 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
762 check_bo_null_return(bo, 0);
764 return bo->status & HMM_BO_PAGE_ALLOCED;
770 int hmm_bo_bind(struct hmm_buffer_object *bo)
777 check_bo_null_return(bo, -EINVAL);
779 mutex_lock(&bo->mutex);
781 check_bo_status_yes_goto(bo,
785 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
787 bdev = bo->bdev;
789 virt = bo->start;
791 for (i = 0; i < bo->pgnr; i++) {
794 page_to_phys(bo->pages[i]), 1);
811 if (bo->start != 0x0)
812 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
813 (bo->pgnr << PAGE_SHIFT));
815 bo->status |= HMM_BO_BINDED;
817 mutex_unlock(&bo->mutex);
823 virt = bo->start;
829 mutex_unlock(&bo->mutex);
835 mutex_unlock(&bo->mutex);
839 mutex_unlock(&bo->mutex);
848 void hmm_bo_unbind(struct hmm_buffer_object *bo)
854 check_bo_null_return_void(bo);
856 mutex_lock(&bo->mutex);
858 check_bo_status_yes_goto(bo,
863 bdev = bo->bdev;
865 virt = bo->start;
867 for (i = 0; i < bo->pgnr; i++) {
876 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
877 (bo->pgnr << PAGE_SHIFT));
879 bo->status &= (~HMM_BO_BINDED);
881 mutex_unlock(&bo->mutex);
886 mutex_unlock(&bo->mutex);
891 int hmm_bo_binded(struct hmm_buffer_object *bo)
895 check_bo_null_return(bo, 0);
897 mutex_lock(&bo->mutex);
899 ret = bo->status & HMM_BO_BINDED;
901 mutex_unlock(&bo->mutex);
906 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
908 check_bo_null_return(bo, NULL);
910 mutex_lock(&bo->mutex);
911 if (((bo->status & HMM_BO_VMAPED) && !cached) ||
912 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
913 mutex_unlock(&bo->mutex);
914 return bo->vmap_addr;
918 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
919 vunmap(bo->vmap_addr);
920 bo->vmap_addr = NULL;
921 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
924 bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
926 if (unlikely(!bo->vmap_addr)) {
927 mutex_unlock(&bo->mutex);
931 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
933 mutex_unlock(&bo->mutex);
934 return bo->vmap_addr;
937 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
939 check_bo_null_return_void(bo);
941 mutex_lock(&bo->mutex);
942 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
943 mutex_unlock(&bo->mutex);
947 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
948 mutex_unlock(&bo->mutex);
951 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
953 check_bo_null_return_void(bo);
955 mutex_lock(&bo->mutex);
956 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
957 vunmap(bo->vmap_addr);
958 bo->vmap_addr = NULL;
959 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
962 mutex_unlock(&bo->mutex);
966 void hmm_bo_ref(struct hmm_buffer_object *bo)
968 check_bo_null_return_void(bo);
970 kref_get(&bo->kref);
981 void hmm_bo_unref(struct hmm_buffer_object *bo)
983 check_bo_null_return_void(bo);
985 kref_put(&bo->kref, kref_hmm_bo_release);
990 struct hmm_buffer_object *bo =
993 check_bo_null_return_void(bo);
995 hmm_bo_ref(bo);
997 mutex_lock(&bo->mutex);
999 bo->status |= HMM_BO_MMAPED;
1001 bo->mmap_count++;
1003 mutex_unlock(&bo->mutex);
1008 struct hmm_buffer_object *bo =
1011 check_bo_null_return_void(bo);
1013 hmm_bo_unref(bo);
1015 mutex_lock(&bo->mutex);
1017 bo->mmap_count--;
1019 if (!bo->mmap_count) {
1020 bo->status &= (~HMM_BO_MMAPED);
1024 mutex_unlock(&bo->mutex);
1033 * mmap the bo to user space.
1035 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1042 check_bo_null_return(bo, -EINVAL);
1044 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1046 pgnr = bo->pgnr;
1062 pfn = page_to_pfn(bo->pages[i]);
1072 vma->vm_private_data = bo;