Lines Matching refs:glob

45 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
96 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
101 (unsigned long) atomic_read(&glob->bo_count));
125 atomic_dec(&bo->glob->bo_count);
131 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
149 ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
174 list_add_tail(&bo->swap, &bo->glob->swap_lru);
282 struct ttm_bo_global *glob = bo->glob;
286 mtx_lock(&bo->glob->lru_lock);
291 mtx_unlock(&glob->lru_lock);
294 mtx_unlock(&bo->glob->lru_lock);
336 struct ttm_bo_global *glob = bo->glob;
339 mtx_lock(&glob->lru_lock);
343 mtx_unlock(&glob->lru_lock);
346 mtx_unlock(&glob->lru_lock);
359 struct ttm_bo_global *glob = bo->glob;
361 mtx_lock(&glob->lru_lock);
363 mtx_unlock(&glob->lru_lock);
372 struct ttm_bo_global *glob = bo->glob;
388 page_flags, glob->dummy_read_page);
395 glob->dummy_read_page);
552 struct ttm_bo_global *glob = bo->glob;
558 mtx_lock(&glob->lru_lock);
567 mtx_unlock(&glob->lru_lock);
585 mtx_unlock(&glob->lru_lock);
613 struct ttm_bo_global *glob = bo->glob;
633 mtx_unlock(&glob->lru_lock);
650 mtx_lock(&glob->lru_lock);
662 mtx_unlock(&glob->lru_lock);
671 mtx_unlock(&glob->lru_lock);
679 mtx_unlock(&glob->lru_lock);
694 struct ttm_bo_global *glob = bdev->glob;
698 mtx_lock(&glob->lru_lock);
725 mtx_unlock(&glob->lru_lock);
734 mtx_lock(&glob->lru_lock);
740 mtx_unlock(&glob->lru_lock);
865 struct ttm_bo_global *glob = bdev->glob;
870 mtx_lock(&glob->lru_lock);
878 mtx_unlock(&glob->lru_lock);
893 mtx_unlock(&glob->lru_lock);
1231 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1264 bo->glob = bdev->glob;
1280 atomic_inc(&bo->glob->bo_count);
1366 struct ttm_bo_global *glob = bdev->glob;
1373 mtx_lock(&glob->lru_lock);
1375 mtx_unlock(&glob->lru_lock);
1384 mtx_lock(&glob->lru_lock);
1386 mtx_unlock(&glob->lru_lock);
1471 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
1474 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1475 vm_page_free(glob->dummy_read_page);
1480 struct ttm_bo_global *glob = ref->object;
1482 if (refcount_release(&glob->kobj_ref))
1483 ttm_bo_global_kobj_release(glob);
1490 struct ttm_bo_global *glob = ref->object;
1493 sx_init(&glob->device_list_mutex, "ttmdlm");
1494 mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
1495 glob->mem_glob = bo_ref->mem_glob;
1496 glob->dummy_read_page = vm_page_alloc_contig(NULL, 0,
1500 if (unlikely(glob->dummy_read_page == NULL)) {
1505 INIT_LIST_HEAD(&glob->swap_lru);
1506 INIT_LIST_HEAD(&glob->device_list);
1508 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1509 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1515 atomic_set(&glob->bo_count, 0);
1517 refcount_init(&glob->kobj_ref, 1);
1521 vm_page_free(glob->dummy_read_page);
1523 free(glob, M_DRM_GLOBAL);
1532 struct ttm_bo_global *glob = bdev->glob;
1547 sx_xlock(&glob->device_list_mutex);
1549 sx_xunlock(&glob->device_list_mutex);
1557 mtx_lock(&glob->lru_lock);
1563 mtx_unlock(&glob->lru_lock);
1574 struct ttm_bo_global *glob,
1603 bdev->glob = glob;
1607 sx_xlock(&glob->device_list_mutex);
1608 list_add_tail(&bdev->device_list, &glob->device_list);
1609 sx_xunlock(&glob->device_list_mutex);
1799 struct ttm_bo_global *glob =
1806 mtx_lock(&glob->lru_lock);
1807 list_for_each_entry(bo, &glob->swap_lru, swap) {
1814 mtx_unlock(&glob->lru_lock);
1828 mtx_unlock(&glob->lru_lock);
1885 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)