ttm_bo_util.c revision 1.4
1/*	$OpenBSD: ttm_bo_util.c,v 1.4 2014/02/10 02:15:25 jsg Exp $	*/
2/**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32#include <dev/pci/drm/ttm/ttm_bo_driver.h>
33#include <dev/pci/drm/ttm/ttm_placement.h>
34#include <dev/pci/drm/refcount.h>
35
36int	 ttm_mem_reg_ioremap(struct ttm_bo_device *, struct ttm_mem_reg *,
37	     void **);
38void	 ttm_mem_reg_iounmap(struct ttm_bo_device *, struct ttm_mem_reg *,
39	     void *);
40
41void	*kmap(struct vm_page *);
42void	 kunmap(void *addr);
43void	*vmap(struct vm_page **, unsigned int, unsigned long, pgprot_t);
44void	 vunmap(void *, size_t);
45
46void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
47{
48	ttm_bo_mem_put(bo, &bo->mem);
49}
50
51int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52		    bool evict,
53		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
54{
55	struct ttm_tt *ttm = bo->ttm;
56	struct ttm_mem_reg *old_mem = &bo->mem;
57	int ret;
58
59	if (old_mem->mem_type != TTM_PL_SYSTEM) {
60		ttm_tt_unbind(ttm);
61		ttm_bo_free_old_node(bo);
62		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63				TTM_PL_MASK_MEM);
64		old_mem->mem_type = TTM_PL_SYSTEM;
65	}
66
67	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
68	if (unlikely(ret != 0))
69		return ret;
70
71	if (new_mem->mem_type != TTM_PL_SYSTEM) {
72		ret = ttm_tt_bind(ttm, new_mem);
73		if (unlikely(ret != 0))
74			return ret;
75	}
76
77	*old_mem = *new_mem;
78	new_mem->mm_node = NULL;
79
80	return 0;
81}
82EXPORT_SYMBOL(ttm_bo_move_ttm);
83
84int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
85{
86	if (likely(man->io_reserve_fastpath))
87		return 0;
88
89	if (interruptible)
90		return rw_enter(&man->io_reserve_rwlock, RW_WRITE | RW_INTR);
91
92	rw_enter_write(&man->io_reserve_rwlock);
93	return 0;
94}
95
96void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
97{
98	if (likely(man->io_reserve_fastpath))
99		return;
100
101	rw_exit_write(&man->io_reserve_rwlock);
102}
103
104static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
105{
106	struct ttm_buffer_object *bo;
107
108	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
109		return -EAGAIN;
110
111	bo = list_first_entry(&man->io_reserve_lru,
112			      struct ttm_buffer_object,
113			      io_reserve_lru);
114	list_del_init(&bo->io_reserve_lru);
115	ttm_bo_unmap_virtual_locked(bo);
116
117	return 0;
118}
119
120static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
121			      struct ttm_mem_reg *mem)
122{
123	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
124	int ret = 0;
125
126	if (!bdev->driver->io_mem_reserve)
127		return 0;
128	if (likely(man->io_reserve_fastpath))
129		return bdev->driver->io_mem_reserve(bdev, mem);
130
131	if (bdev->driver->io_mem_reserve &&
132	    mem->bus.io_reserved_count++ == 0) {
133retry:
134		ret = bdev->driver->io_mem_reserve(bdev, mem);
135		if (ret == -EAGAIN) {
136			ret = ttm_mem_io_evict(man);
137			if (ret == 0)
138				goto retry;
139		}
140	}
141	return ret;
142}
143
144static void ttm_mem_io_free(struct ttm_bo_device *bdev,
145			    struct ttm_mem_reg *mem)
146{
147	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148
149	if (likely(man->io_reserve_fastpath))
150		return;
151
152	if (bdev->driver->io_mem_reserve &&
153	    --mem->bus.io_reserved_count == 0 &&
154	    bdev->driver->io_mem_free)
155		bdev->driver->io_mem_free(bdev, mem);
156
157}
158
159int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
160{
161	struct ttm_mem_reg *mem = &bo->mem;
162	int ret;
163
164	if (!mem->bus.io_reserved_vm) {
165		struct ttm_mem_type_manager *man =
166			&bo->bdev->man[mem->mem_type];
167
168		ret = ttm_mem_io_reserve(bo->bdev, mem);
169		if (unlikely(ret != 0))
170			return ret;
171		mem->bus.io_reserved_vm = true;
172		if (man->use_io_reserve_lru)
173			list_add_tail(&bo->io_reserve_lru,
174				      &man->io_reserve_lru);
175	}
176	return 0;
177}
178
179void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180{
181	struct ttm_mem_reg *mem = &bo->mem;
182
183	if (mem->bus.io_reserved_vm) {
184		mem->bus.io_reserved_vm = false;
185		list_del_init(&bo->io_reserve_lru);
186		ttm_mem_io_free(bo->bdev, mem);
187	}
188}
189
190int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
191			void **virtual)
192{
193	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
194	int ret;
195	void *addr;
196	int flags;
197
198	*virtual = NULL;
199	(void) ttm_mem_io_lock(man, false);
200	ret = ttm_mem_io_reserve(bdev, mem);
201	ttm_mem_io_unlock(man);
202	if (ret || !mem->bus.is_iomem)
203		return ret;
204
205	if (mem->bus.addr) {
206		addr = mem->bus.addr;
207	} else {
208		if (mem->placement & TTM_PL_FLAG_WC)
209			flags = BUS_SPACE_MAP_PREFETCHABLE;
210		else
211			flags = 0;
212
213		if (bus_space_map(bdev->memt, mem->bus.base + mem->bus.offset,
214		    mem->bus.size, BUS_SPACE_MAP_LINEAR | flags, &mem->bus.bsh)) {
215			printf("%s bus_space_map failed\n", __func__);
216			return -ENOMEM;
217		}
218
219		addr = bus_space_vaddr(bdev->memt, mem->bus.bsh);
220
221		if (!addr) {
222			(void) ttm_mem_io_lock(man, false);
223			ttm_mem_io_free(bdev, mem);
224			ttm_mem_io_unlock(man);
225			return -ENOMEM;
226		}
227	}
228	*virtual = addr;
229	return 0;
230}
231
232void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
233			 void *virtual)
234{
235	struct ttm_mem_type_manager *man;
236
237	man = &bdev->man[mem->mem_type];
238
239	if (virtual && mem->bus.addr == NULL)
240		bus_space_unmap(bdev->memt, mem->bus.bsh, mem->bus.size);
241	(void) ttm_mem_io_lock(man, false);
242	ttm_mem_io_free(bdev, mem);
243	ttm_mem_io_unlock(man);
244}
245
246static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
247{
248	uint32_t *dstP =
249	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
250	uint32_t *srcP =
251	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
252
253	int i;
254	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
255#ifdef notyet
256		iowrite32(ioread32(srcP++), dstP++);
257#else
258		*dstP++ = *srcP++;
259#endif
260	return 0;
261}
262
263static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
264				unsigned long page,
265				pgprot_t prot)
266{
267	struct vm_page *d = ttm->pages[page];
268	void *dst;
269
270	if (!d)
271		return -ENOMEM;
272
273	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
274
275	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
276		dst = vmap(&d, 1, 0, prot);
277	else
278		dst = kmap(d);
279	if (!dst)
280		return -ENOMEM;
281
282	memcpy(dst, src, PAGE_SIZE);
283
284	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
285		vunmap(dst, PAGE_SIZE);
286	else
287		kunmap(d);
288
289	return 0;
290}
291
292static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
293				unsigned long page,
294				vm_prot_t prot)
295{
296	struct vm_page *s = ttm->pages[page];
297	void *src;
298
299	if (!s)
300		return -ENOMEM;
301
302	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
303	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
304		src = vmap(&s, 1, 0, prot);
305	else
306		src = kmap(s);
307	if (!src)
308		return -ENOMEM;
309
310#define memcpy_toio(d, s, n) memcpy(d, s, n)
311	memcpy_toio(dst, src, PAGE_SIZE);
312
313	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
314		vunmap(src, PAGE_SIZE);
315	else
316		kunmap(s);
317
318	return 0;
319}
320
321int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
322		       bool evict, bool no_wait_gpu,
323		       struct ttm_mem_reg *new_mem)
324{
325	struct ttm_bo_device *bdev = bo->bdev;
326	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
327	struct ttm_tt *ttm = bo->ttm;
328	struct ttm_mem_reg *old_mem = &bo->mem;
329	struct ttm_mem_reg old_copy = *old_mem;
330	void *old_iomap;
331	void *new_iomap;
332	int ret;
333	unsigned long i;
334	unsigned long page;
335	unsigned long add = 0;
336	int dir;
337
338	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
339	if (ret)
340		return ret;
341	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
342	if (ret)
343		goto out;
344
345	if (old_iomap == NULL && new_iomap == NULL)
346		goto out2;
347	if (old_iomap == NULL && ttm == NULL)
348		goto out2;
349
350	/* TTM might be null for moves within the same region.
351	 */
352	if (ttm && ttm->state == tt_unpopulated) {
353		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
354		if (ret) {
355			/* if we fail here don't nuke the mm node
356			 * as the bo still owns it */
357			old_copy.mm_node = NULL;
358			goto out1;
359		}
360	}
361
362	add = 0;
363	dir = 1;
364
365	if ((old_mem->mem_type == new_mem->mem_type) &&
366	    (new_mem->start < old_mem->start + old_mem->size)) {
367		dir = -1;
368		add = new_mem->num_pages - 1;
369	}
370
371	for (i = 0; i < new_mem->num_pages; ++i) {
372		page = i * dir + add;
373		if (old_iomap == NULL) {
374			pgprot_t prot = ttm_io_prot(old_mem->placement,
375						    PAGE_KERNEL);
376			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
377						   prot);
378		} else if (new_iomap == NULL) {
379			pgprot_t prot = ttm_io_prot(new_mem->placement,
380						    PAGE_KERNEL);
381			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
382						   prot);
383		} else
384			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
385		if (ret) {
386			/* failing here, means keep old copy as-is */
387			old_copy.mm_node = NULL;
388			goto out1;
389		}
390	}
391	DRM_MEMORYBARRIER();
392out2:
393	old_copy = *old_mem;
394	*old_mem = *new_mem;
395	new_mem->mm_node = NULL;
396
397	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
398		ttm_tt_unbind(ttm);
399		ttm_tt_destroy(ttm);
400		bo->ttm = NULL;
401	}
402
403out1:
404	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
405out:
406	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
407	ttm_bo_mem_put(bo, &old_copy);
408	return ret;
409}
410EXPORT_SYMBOL(ttm_bo_move_memcpy);
411
412static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
413{
414	kfree(bo);
415}
416
417/**
418 * ttm_buffer_object_transfer
419 *
420 * @bo: A pointer to a struct ttm_buffer_object.
421 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
422 * holding the data of @bo with the old placement.
423 *
424 * This is a utility function that may be called after an accelerated move
425 * has been scheduled. A new buffer object is created as a placeholder for
426 * the old data while it's being copied. When that buffer object is idle,
427 * it can be destroyed, releasing the space of the old placement.
428 * Returns:
429 * !0: Failure.
430 */
431
432static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
433				      struct ttm_buffer_object **new_obj)
434{
435	struct ttm_buffer_object *fbo;
436	struct ttm_bo_device *bdev = bo->bdev;
437	struct ttm_bo_driver *driver = bdev->driver;
438
439	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
440	if (!fbo)
441		return -ENOMEM;
442
443	*fbo = *bo;
444
445	/**
446	 * Fix up members that we shouldn't copy directly:
447	 * TODO: Explicit member copy would probably be better here.
448	 */
449
450#ifdef notyet
451	init_waitqueue_head(&fbo->event_queue);
452#endif
453	INIT_LIST_HEAD(&fbo->ddestroy);
454	INIT_LIST_HEAD(&fbo->lru);
455	INIT_LIST_HEAD(&fbo->swap);
456	INIT_LIST_HEAD(&fbo->io_reserve_lru);
457	fbo->vm_node = NULL;
458	atomic_set(&fbo->cpu_writers, 0);
459
460	mtx_enter(&bdev->fence_lock);
461	if (bo->sync_obj)
462		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
463	else
464		fbo->sync_obj = NULL;
465	mtx_leave(&bdev->fence_lock);
466	refcount_init(&fbo->list_kref, 1);
467	refcount_init(&fbo->kref, 1);
468	fbo->destroy = &ttm_transfered_destroy;
469	fbo->acc_size = 0;
470
471	*new_obj = fbo;
472	return 0;
473}
474
475pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
476{
477#ifdef PMAP_WC
478	if (caching_flags & TTM_PL_FLAG_WC)
479		return PMAP_WC;
480	else
481#endif
482		return PMAP_NOCACHE;
483}
484EXPORT_SYMBOL(ttm_io_prot);
485
486static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
487			  unsigned long offset,
488			  unsigned long size,
489			  struct ttm_bo_kmap_obj *map)
490{
491	struct ttm_mem_reg *mem = &bo->mem;
492	int flags;
493
494	if (bo->mem.bus.addr) {
495		map->bo_kmap_type = ttm_bo_map_premapped;
496		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
497	} else {
498		map->bo_kmap_type = ttm_bo_map_iomap;
499		if (mem->placement & TTM_PL_FLAG_WC)
500			flags = BUS_SPACE_MAP_PREFETCHABLE;
501		else
502			flags = 0;
503
504		if (bus_space_map(bo->bdev->memt,
505		    mem->bus.base + bo->mem.bus.offset + offset,
506		    size, BUS_SPACE_MAP_LINEAR | flags,
507		    &bo->mem.bus.bsh)) {
508			printf("%s bus_space_map failed\n", __func__);
509			map->virtual = 0;
510		} else
511			map->virtual = bus_space_vaddr(bo->bdev->memt,
512			    bo->mem.bus.bsh);
513	}
514	return (!map->virtual) ? -ENOMEM : 0;
515}
516
517void *
518kmap(struct vm_page *pg)
519{
520	vaddr_t va;
521
522#if defined (__HAVE_PMAP_DIRECT)
523	va = pmap_map_direct(pg);
524#else
525	va = uvm_km_valloc(kernel_map, PAGE_SIZE);
526	if (va == 0)
527		return (NULL);
528	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
529	pmap_update(pmap_kernel());
530#endif
531	return (void *)va;
532}
533
534void
535kunmap(void *addr)
536{
537	vaddr_t va = (vaddr_t)addr;
538
539#if defined (__HAVE_PMAP_DIRECT)
540	pmap_unmap_direct(va);
541#else
542	pmap_kremove(va, PAGE_SIZE);
543	pmap_update(pmap_kernel());
544	uvm_km_free(kernel_map, va, PAGE_SIZE);
545#endif
546}
547
548void *
549vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
550     pgprot_t prot)
551{
552	vaddr_t va;
553	paddr_t pa;
554	int i;
555
556	va = uvm_km_valloc(kernel_map, PAGE_SIZE * npages);
557	if (va == 0)
558		return NULL;
559	for (i = 0; i < npages; i++) {
560		pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
561		pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
562		    VM_PROT_READ | VM_PROT_WRITE,
563		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
564		pmap_update(pmap_kernel());
565	}
566
567	return (void *)va;
568}
569
570void
571vunmap(void *addr, size_t size)
572{
573	vaddr_t va = (vaddr_t)addr;
574
575	pmap_remove(pmap_kernel(), va, va + size);
576	pmap_update(pmap_kernel());
577	uvm_km_free(kernel_map, va, size);
578}
579
580static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
581			   unsigned long start_page,
582			   unsigned long num_pages,
583			   struct ttm_bo_kmap_obj *map)
584{
585	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
586	struct ttm_tt *ttm = bo->ttm;
587	int ret;
588
589	BUG_ON(!ttm);
590
591	if (ttm->state == tt_unpopulated) {
592		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
593		if (ret)
594			return ret;
595	}
596
597	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
598		/*
599		 * We're mapping a single page, and the desired
600		 * page protection is consistent with the bo.
601		 */
602
603		map->bo_kmap_type = ttm_bo_map_kmap;
604		map->page = ttm->pages[start_page];
605		map->virtual = kmap(map->page);
606	} else {
607		/*
608		 * We need to use vmap to get the desired page protection
609		 * or to make the buffer object look contiguous.
610		 */
611		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
612			PAGE_KERNEL :
613			ttm_io_prot(mem->placement, PAGE_KERNEL);
614		map->bo_kmap_type = ttm_bo_map_vmap;
615		map->virtual = vmap(ttm->pages + start_page, num_pages,
616				    0, prot);
617	}
618	return (!map->virtual) ? -ENOMEM : 0;
619}
620
621int ttm_bo_kmap(struct ttm_buffer_object *bo,
622		unsigned long start_page, unsigned long num_pages,
623		struct ttm_bo_kmap_obj *map)
624{
625	struct ttm_mem_type_manager *man =
626		&bo->bdev->man[bo->mem.mem_type];
627	unsigned long offset, size;
628	int ret;
629
630	BUG_ON(!list_empty(&bo->swap));
631	map->virtual = NULL;
632	map->bo = bo;
633	if (num_pages > bo->num_pages)
634		return -EINVAL;
635	if (start_page > bo->num_pages)
636		return -EINVAL;
637#if 0
638	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
639		return -EPERM;
640#endif
641	(void) ttm_mem_io_lock(man, false);
642	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
643	ttm_mem_io_unlock(man);
644	if (ret)
645		return ret;
646	if (!bo->mem.bus.is_iomem) {
647		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
648	} else {
649		offset = start_page << PAGE_SHIFT;
650		size = num_pages << PAGE_SHIFT;
651		return ttm_bo_ioremap(bo, offset, size, map);
652	}
653}
654EXPORT_SYMBOL(ttm_bo_kmap);
655
656void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
657{
658	struct ttm_buffer_object *bo = map->bo;
659	struct ttm_mem_type_manager *man =
660		&bo->bdev->man[bo->mem.mem_type];
661
662	if (!map->virtual)
663		return;
664	switch (map->bo_kmap_type) {
665	case ttm_bo_map_iomap:
666		bus_space_unmap(bo->bdev->memt, bo->mem.bus.bsh,
667		    bo->mem.bus.size);
668		break;
669	case ttm_bo_map_vmap:
670		vunmap(map->virtual, bo->mem.bus.size);
671		break;
672	case ttm_bo_map_kmap:
673		kunmap(map->virtual);
674		break;
675	case ttm_bo_map_premapped:
676		break;
677	default:
678		BUG();
679	}
680	(void) ttm_mem_io_lock(man, false);
681	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
682	ttm_mem_io_unlock(man);
683	map->virtual = NULL;
684	map->page = NULL;
685}
686EXPORT_SYMBOL(ttm_bo_kunmap);
687
688int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
689			      void *sync_obj,
690			      bool evict,
691			      bool no_wait_gpu,
692			      struct ttm_mem_reg *new_mem)
693{
694	struct ttm_bo_device *bdev = bo->bdev;
695	struct ttm_bo_driver *driver = bdev->driver;
696	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
697	struct ttm_mem_reg *old_mem = &bo->mem;
698	int ret;
699	struct ttm_buffer_object *ghost_obj;
700	void *tmp_obj = NULL;
701
702	mtx_enter(&bdev->fence_lock);
703	if (bo->sync_obj) {
704		tmp_obj = bo->sync_obj;
705		bo->sync_obj = NULL;
706	}
707	bo->sync_obj = driver->sync_obj_ref(sync_obj);
708	if (evict) {
709		ret = ttm_bo_wait(bo, false, false, false);
710		mtx_leave(&bdev->fence_lock);
711		if (tmp_obj)
712			driver->sync_obj_unref(&tmp_obj);
713		if (ret)
714			return ret;
715
716		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
717		    (bo->ttm != NULL)) {
718			ttm_tt_unbind(bo->ttm);
719			ttm_tt_destroy(bo->ttm);
720			bo->ttm = NULL;
721		}
722		ttm_bo_free_old_node(bo);
723	} else {
724		/**
725		 * This should help pipeline ordinary buffer moves.
726		 *
727		 * Hang old buffer memory on a new buffer object,
728		 * and leave it to be released when the GPU
729		 * operation has completed.
730		 */
731
732		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
733		mtx_leave(&bdev->fence_lock);
734		if (tmp_obj)
735			driver->sync_obj_unref(&tmp_obj);
736
737		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
738		if (ret)
739			return ret;
740
741		/**
742		 * If we're not moving to fixed memory, the TTM object
743		 * needs to stay alive. Otherwhise hang it on the ghost
744		 * bo to be unbound and destroyed.
745		 */
746
747		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
748			ghost_obj->ttm = NULL;
749		else
750			bo->ttm = NULL;
751
752		ttm_bo_unreserve(ghost_obj);
753		ttm_bo_unref(&ghost_obj);
754	}
755
756	*old_mem = *new_mem;
757	new_mem->mm_node = NULL;
758
759	return 0;
760}
761EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
762