• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/gpu/drm/nouveau/

Lines Matching defs:nvbo

44 	struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
47 if (!nvbo)
49 nvbo->gem = NULL;
51 if (unlikely(nvbo->cpu_filp))
54 if (unlikely(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo);
71 struct nouveau_bo *nvbo;
78 nvbo = *pnvbo;
80 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
81 if (!nvbo->gem) {
86 nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
87 nvbo->gem->driver_private = nvbo;
94 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
96 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
101 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
102 rep->offset = nvbo->bo.offset;
103 rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
104 rep->tile_mode = nvbo->tile_mode;
105 rep->tile_flags = nvbo->tile_flags;
135 struct nouveau_bo *nvbo = NULL;
161 &nvbo);
165 ret = nouveau_gem_info(nvbo->gem, &req->info);
169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
171 drm_gem_object_unreference_unlocked(nvbo->gem);
180 struct nouveau_bo *nvbo = gem->driver_private;
181 struct ttm_buffer_object *bo = &nvbo->bo;
209 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
224 struct nouveau_bo *nvbo;
227 nvbo = list_entry(entry, struct nouveau_bo, entry);
231 spin_lock(&nvbo->bo.lock);
232 prev_fence = nvbo->bo.sync_obj;
233 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
234 spin_unlock(&nvbo->bo.lock);
238 if (unlikely(nvbo->validate_mapped)) {
239 ttm_bo_kunmap(&nvbo->kmap);
240 nvbo->validate_mapped = false;
243 list_del(&nvbo->entry);
244 nvbo->reserved_by = NULL;
245 ttm_bo_unreserve(&nvbo->bo);
246 drm_gem_object_unreference_unlocked(nvbo->gem);
279 struct nouveau_bo *nvbo;
287 nvbo = gem->driver_private;
289 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
296 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
300 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
309 b->user_priv = (uint64_t)(unsigned long)nvbo;
310 nvbo->reserved_by = file_priv;
311 nvbo->pbbo_index = i;
314 list_add_tail(&nvbo->entry, &op->both_list);
317 list_add_tail(&nvbo->entry, &op->vram_list);
320 list_add_tail(&nvbo->entry, &op->gart_list);
324 list_add_tail(&nvbo->entry, &op->both_list);
329 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
332 if (nvbo->cpu_filp == file_priv) {
334 "to validate it!\n", nvbo);
339 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
359 struct nouveau_bo *nvbo;
362 list_for_each_entry(nvbo, list, entry) {
363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
365 ret = nouveau_bo_sync_gpu(nvbo, chan);
371 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
379 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
380 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
382 nvbo->channel = NULL;
388 ret = nouveau_bo_sync_gpu(nvbo, chan);
394 if (nvbo->bo.offset == b->presumed.offset &&
395 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
397 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
401 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
405 b->presumed.offset = nvbo->bo.offset;
409 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
502 struct nouveau_bo *nvbo;
520 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
523 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
529 if (!nvbo->kmap.virtual) {
530 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
531 &nvbo->kmap);
536 nvbo->validate_mapped = true;
554 spin_lock(&nvbo->bo.lock);
555 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
556 spin_unlock(&nvbo->bo.lock);
562 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
657 struct nouveau_bo *nvbo = (void *)(unsigned long)
660 nv50_dma_push(chan, nvbo, push[i].offset,
672 struct nouveau_bo *nvbo = (void *)(unsigned long)
674 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
688 struct nouveau_bo *nvbo = (void *)(unsigned long)
690 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
696 if (!nvbo->kmap.virtual) {
697 ret = ttm_bo_kmap(&nvbo->bo, 0,
698 nvbo->bo.mem.
700 &nvbo->kmap);
705 nvbo->validate_mapped = true;
708 nouveau_bo_wr32(nvbo, (push[i].offset +
751 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
769 struct nouveau_bo *nvbo;
776 nvbo = nouveau_gem_object(gem);
778 if (nvbo->cpu_filp) {
779 if (nvbo->cpu_filp == file_priv)
782 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
788 spin_lock(&nvbo->bo.lock);
789 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
790 spin_unlock(&nvbo->bo.lock);
792 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
794 nvbo->cpu_filp = file_priv;
808 struct nouveau_bo *nvbo;
814 nvbo = nouveau_gem_object(gem);
816 if (nvbo->cpu_filp != file_priv)
818 nvbo->cpu_filp = NULL;
820 ttm_bo_synccpu_write_release(&nvbo->bo);