Lines Matching refs:drm

60 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
63 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
69 struct nouveau_drm *drm;
81 struct nouveau_drm *drm;
98 return chunk->drm;
113 struct nouveau_dmem *dmem = chunk->drm->dmem;
141 static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
144 struct device *dev = drm->dev->dev;
152 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
163 struct nouveau_drm *drm = page_to_drm(vmf->page);
164 struct nouveau_dmem *dmem = drm->dmem;
177 .pgmap_owner = drm->dev,
205 ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
215 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
227 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
250 chunk->drm = drm;
256 chunk->pagemap.owner = drm->dev;
258 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
274 mutex_lock(&drm->dmem->mutex);
275 list_add(&chunk->list, &drm->dmem->chunks);
276 mutex_unlock(&drm->dmem->mutex);
280 spin_lock(&drm->dmem->lock);
282 page->zone_device_data = drm->dmem->free_pages;
283 drm->dmem->free_pages = page;
287 spin_unlock(&drm->dmem->lock);
289 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
307 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
313 spin_lock(&drm->dmem->lock);
314 if (drm->dmem->free_pages) {
315 page = drm->dmem->free_pages;
316 drm->dmem->free_pages = page->zone_device_data;
319 spin_unlock(&drm->dmem->lock);
321 spin_unlock(&drm->dmem->lock);
322 ret = nouveau_dmem_chunk_alloc(drm, &page);
332 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
339 nouveau_dmem_resume(struct nouveau_drm *drm)
344 if (drm->dmem == NULL)
347 mutex_lock(&drm->dmem->mutex);
348 list_for_each_entry(chunk, &drm->dmem->chunks, list) {
353 mutex_unlock(&drm->dmem->mutex);
357 nouveau_dmem_suspend(struct nouveau_drm *drm)
361 if (drm->dmem == NULL)
364 mutex_lock(&drm->dmem->mutex);
365 list_for_each_entry(chunk, &drm->dmem->chunks, list)
367 mutex_unlock(&drm->dmem->mutex);
399 nouveau_dmem_copy_one(chunk->drm,
405 nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
412 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
417 nouveau_dmem_fini(struct nouveau_drm *drm)
421 if (drm->dmem == NULL)
424 mutex_lock(&drm->dmem->mutex);
426 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
438 mutex_unlock(&drm->dmem->mutex);
442 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
446 struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
516 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
519 struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
572 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
574 switch (drm->ttm.copy.oclass) {
579 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
580 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
581 drm->dmem->migrate.chan = drm->ttm.chan;
590 nouveau_dmem_init(struct nouveau_drm *drm)
595 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
598 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
601 drm->dmem->drm = drm;
602 mutex_init(&drm->dmem->mutex);
603 INIT_LIST_HEAD(&drm->dmem->chunks);
604 mutex_init(&drm->dmem->mutex);
605 spin_lock_init(&drm->dmem->lock);
608 ret = nouveau_dmem_migrate_init(drm);
610 kfree(drm->dmem);
611 drm->dmem = NULL;
615 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
619 struct device *dev = drm->dev->dev;
627 dpage = nouveau_dmem_page_alloc_locked(drm);
637 if (drm->dmem->migrate.copy_func(drm, 1,
642 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
657 nouveau_dmem_page_free_locked(drm, dpage);
663 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
671 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
673 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
678 nouveau_fence_new(&fence, drm->dmem->migrate.chan);
684 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
691 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
703 .pgmap_owner = drm->dev,
710 if (drm->dmem == NULL)
739 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,