1247835Skib/**************************************************************************
2247835Skib *
3247835Skib * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4247835Skib * All Rights Reserved.
5247835Skib *
6247835Skib * Permission is hereby granted, free of charge, to any person obtaining a
7247835Skib * copy of this software and associated documentation files (the
8247835Skib * "Software"), to deal in the Software without restriction, including
9247835Skib * without limitation the rights to use, copy, modify, merge, publish,
10247835Skib * distribute, sub license, and/or sell copies of the Software, and to
11247835Skib * permit persons to whom the Software is furnished to do so, subject to
12247835Skib * the following conditions:
13247835Skib *
14247835Skib * The above copyright notice and this permission notice (including the
15247835Skib * next paragraph) shall be included in all copies or substantial portions
16247835Skib * of the Software.
17247835Skib *
18247835Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19247835Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20247835Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21247835Skib * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22247835Skib * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23247835Skib * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24247835Skib * USE OR OTHER DEALINGS IN THE SOFTWARE.
25247835Skib *
26247835Skib **************************************************************************/
27247835Skib/*
28247835Skib * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29247835Skib */
30247835Skib
31247835Skib#include <sys/cdefs.h>
32247835Skib__FBSDID("$FreeBSD$");
33247835Skib
34247835Skib#include <dev/drm2/drmP.h>
35247835Skib#include <dev/drm2/ttm/ttm_module.h>
36247835Skib#include <dev/drm2/ttm/ttm_bo_driver.h>
37247835Skib#include <dev/drm2/ttm/ttm_placement.h>
38247835Skib
39247835Skib#define TTM_ASSERT_LOCKED(param)
40247835Skib#define TTM_DEBUG(fmt, arg...)
41247835Skib#define TTM_BO_HASH_ORDER 13
42247835Skib
43247835Skibstatic int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
44247835Skibstatic int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
45247835Skibstatic void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
46247835Skib
47247835SkibMALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
48247835Skib
49247835Skibstatic inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
50247835Skib{
51247835Skib	int i;
52247835Skib
53247835Skib	for (i = 0; i <= TTM_PL_PRIV5; i++)
54247835Skib		if (flags & (1 << i)) {
55247835Skib			*mem_type = i;
56247835Skib			return 0;
57247835Skib		}
58247835Skib	return -EINVAL;
59247835Skib}
60247835Skib
61247835Skibstatic void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
62247835Skib{
63247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
64247835Skib
65247835Skib	printf("    has_type: %d\n", man->has_type);
66247835Skib	printf("    use_type: %d\n", man->use_type);
67247835Skib	printf("    flags: 0x%08X\n", man->flags);
68247835Skib	printf("    gpu_offset: 0x%08lX\n", man->gpu_offset);
69247835Skib	printf("    size: %ju\n", (uintmax_t)man->size);
70247835Skib	printf("    available_caching: 0x%08X\n", man->available_caching);
71247835Skib	printf("    default_caching: 0x%08X\n", man->default_caching);
72247835Skib	if (mem_type != TTM_PL_SYSTEM)
73247835Skib		(*man->func->debug)(man, TTM_PFX);
74247835Skib}
75247835Skib
76247835Skibstatic void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
77247835Skib					struct ttm_placement *placement)
78247835Skib{
79247835Skib	int i, ret, mem_type;
80247835Skib
81247835Skib	printf("No space for %p (%lu pages, %luK, %luM)\n",
82247835Skib	       bo, bo->mem.num_pages, bo->mem.size >> 10,
83247835Skib	       bo->mem.size >> 20);
84247835Skib	for (i = 0; i < placement->num_placement; i++) {
85247835Skib		ret = ttm_mem_type_from_flags(placement->placement[i],
86247835Skib						&mem_type);
87247835Skib		if (ret)
88247835Skib			return;
89247835Skib		printf("  placement[%d]=0x%08X (%d)\n",
90247835Skib		       i, placement->placement[i], mem_type);
91247835Skib		ttm_mem_type_debug(bo->bdev, mem_type);
92247835Skib	}
93247835Skib}
94247835Skib
95247835Skib#if 0
96247835Skibstatic ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
97247835Skib    char *buffer)
98247835Skib{
99247835Skib
100247835Skib	return snprintf(buffer, PAGE_SIZE, "%lu\n",
101247835Skib			(unsigned long) atomic_read(&glob->bo_count));
102247835Skib}
103247835Skib#endif
104247835Skib
105247835Skibstatic inline uint32_t ttm_bo_type_flags(unsigned type)
106247835Skib{
107247835Skib	return 1 << (type);
108247835Skib}
109247835Skib
110247835Skibstatic void ttm_bo_release_list(struct ttm_buffer_object *bo)
111247835Skib{
112247835Skib	struct ttm_bo_device *bdev = bo->bdev;
113247835Skib	size_t acc_size = bo->acc_size;
114247835Skib
115247835Skib	MPASS(atomic_read(&bo->list_kref) == 0);
116247835Skib	MPASS(atomic_read(&bo->kref) == 0);
117247835Skib	MPASS(atomic_read(&bo->cpu_writers) == 0);
118247835Skib	MPASS(bo->sync_obj == NULL);
119247835Skib	MPASS(bo->mem.mm_node == NULL);
120247835Skib	MPASS(list_empty(&bo->lru));
121247835Skib	MPASS(list_empty(&bo->ddestroy));
122247835Skib
123247835Skib	if (bo->ttm)
124247835Skib		ttm_tt_destroy(bo->ttm);
125247835Skib	atomic_dec(&bo->glob->bo_count);
126247835Skib	if (bo->destroy)
127247835Skib		bo->destroy(bo);
128247835Skib	else {
129247835Skib		free(bo, M_TTM_BO);
130247835Skib	}
131247835Skib	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
132247835Skib}
133247835Skib
134254865Sdumbbellstatic int
135247835Skibttm_bo_wait_unreserved_locked(struct ttm_buffer_object *bo, bool interruptible)
136247835Skib{
137247835Skib	const char *wmsg;
138247835Skib	int flags, ret;
139247835Skib
140247835Skib	ret = 0;
141247835Skib	if (interruptible) {
142247835Skib		flags = PCATCH;
143247835Skib		wmsg = "ttbowi";
144247835Skib	} else {
145247835Skib		flags = 0;
146247835Skib		wmsg = "ttbowu";
147247835Skib	}
148254878Sdumbbell	while (ttm_bo_is_reserved(bo)) {
149247835Skib		ret = -msleep(bo, &bo->glob->lru_lock, flags, wmsg, 0);
150259754Sdumbbell		if (ret == -EINTR)
151259754Sdumbbell			ret = -ERESTARTSYS;
152247835Skib		if (ret != 0)
153247835Skib			break;
154247835Skib	}
155247835Skib	return (ret);
156247835Skib}
157247835Skib
158247835Skibvoid ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
159247835Skib{
160247835Skib	struct ttm_bo_device *bdev = bo->bdev;
161247835Skib	struct ttm_mem_type_manager *man;
162247835Skib
163247835Skib	MPASS(ttm_bo_is_reserved(bo));
164247835Skib
165247835Skib	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
166247835Skib
167247835Skib		MPASS(list_empty(&bo->lru));
168247835Skib
169247835Skib		man = &bdev->man[bo->mem.mem_type];
170247835Skib		list_add_tail(&bo->lru, &man->lru);
171247835Skib		refcount_acquire(&bo->list_kref);
172247835Skib
173247835Skib		if (bo->ttm != NULL) {
174247835Skib			list_add_tail(&bo->swap, &bo->glob->swap_lru);
175247835Skib			refcount_acquire(&bo->list_kref);
176247835Skib		}
177247835Skib	}
178247835Skib}
179247835Skib
180247835Skibint ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
181247835Skib{
182247835Skib	int put_count = 0;
183247835Skib
184247835Skib	if (!list_empty(&bo->swap)) {
185247835Skib		list_del_init(&bo->swap);
186247835Skib		++put_count;
187247835Skib	}
188247835Skib	if (!list_empty(&bo->lru)) {
189247835Skib		list_del_init(&bo->lru);
190247835Skib		++put_count;
191247835Skib	}
192247835Skib
193247835Skib	/*
194247835Skib	 * TODO: Add a driver hook to delete from
195247835Skib	 * driver-specific LRU's here.
196247835Skib	 */
197247835Skib
198247835Skib	return put_count;
199247835Skib}
200247835Skib
201254861Sdumbbellint ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
202247835Skib			  bool interruptible,
203247835Skib			  bool no_wait, bool use_sequence, uint32_t sequence)
204247835Skib{
205247835Skib	int ret;
206247835Skib
207254871Sdumbbell	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
208247835Skib		/**
209247835Skib		 * Deadlock avoidance for multi-bo reserving.
210247835Skib		 */
211247835Skib		if (use_sequence && bo->seq_valid) {
212247835Skib			/**
213247835Skib			 * We've already reserved this one.
214247835Skib			 */
215247835Skib			if (unlikely(sequence == bo->val_seq))
216247835Skib				return -EDEADLK;
217247835Skib			/**
218247835Skib			 * Already reserved by a thread that will not back
219247835Skib			 * off for us. We need to back off.
220247835Skib			 */
221247835Skib			if (unlikely(sequence - bo->val_seq < (1 << 31)))
222247835Skib				return -EAGAIN;
223247835Skib		}
224247835Skib
225247835Skib		if (no_wait)
226247835Skib			return -EBUSY;
227247835Skib
228247835Skib		ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
229254861Sdumbbell
230247835Skib		if (unlikely(ret))
231247835Skib			return ret;
232247835Skib	}
233247835Skib
234247835Skib	if (use_sequence) {
235254871Sdumbbell		bool wake_up = false;
236247835Skib		/**
237247835Skib		 * Wake up waiters that may need to recheck for deadlock,
238247835Skib		 * if we decreased the sequence number.
239247835Skib		 */
240247835Skib		if (unlikely((bo->val_seq - sequence < (1 << 31))
241247835Skib			     || !bo->seq_valid))
242254871Sdumbbell			wake_up = true;
243247835Skib
244254871Sdumbbell		/*
245254871Sdumbbell		 * In the worst case with memory ordering these values can be
246254871Sdumbbell		 * seen in the wrong order. However since we call wake_up_all
247254871Sdumbbell		 * in that case, this will hopefully not pose a problem,
248254871Sdumbbell		 * and the worst case would only cause someone to accidentally
249254871Sdumbbell		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
250254871Sdumbbell		 * val_seq. However this would only happen if seq_valid was
251254871Sdumbbell		 * written before val_seq was, and just means some slightly
252254871Sdumbbell		 * increased cpu usage
253254871Sdumbbell		 */
254247835Skib		bo->val_seq = sequence;
255247835Skib		bo->seq_valid = true;
256254871Sdumbbell		if (wake_up)
257254861Sdumbbell			wakeup(bo);
258247835Skib	} else {
259247835Skib		bo->seq_valid = false;
260247835Skib	}
261247835Skib
262247835Skib	return 0;
263247835Skib}
264247835Skib
265247835Skibvoid ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
266247835Skib			 bool never_free)
267247835Skib{
268247835Skib	u_int old;
269247835Skib
270247835Skib	old = atomic_fetchadd_int(&bo->list_kref, -count);
271247835Skib	if (old <= count) {
272247835Skib		if (never_free)
273247835Skib			panic("ttm_bo_ref_buf");
274247835Skib		ttm_bo_release_list(bo);
275247835Skib	}
276247835Skib}
277247835Skib
278247835Skibint ttm_bo_reserve(struct ttm_buffer_object *bo,
279247835Skib		   bool interruptible,
280247835Skib		   bool no_wait, bool use_sequence, uint32_t sequence)
281247835Skib{
282247835Skib	struct ttm_bo_global *glob = bo->glob;
283247835Skib	int put_count = 0;
284247835Skib	int ret;
285247835Skib
286254878Sdumbbell	mtx_lock(&bo->glob->lru_lock);
287254861Sdumbbell	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
288254861Sdumbbell				   sequence);
289254861Sdumbbell	if (likely(ret == 0)) {
290247835Skib		put_count = ttm_bo_del_from_lru(bo);
291254861Sdumbbell		mtx_unlock(&glob->lru_lock);
292254861Sdumbbell		ttm_bo_list_ref_sub(bo, put_count, true);
293254878Sdumbbell	} else
294254878Sdumbbell		mtx_unlock(&bo->glob->lru_lock);
295247835Skib
296247835Skib	return ret;
297247835Skib}
298247835Skib
299254863Sdumbbellint ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
300254863Sdumbbell				  bool interruptible, uint32_t sequence)
301254863Sdumbbell{
302254863Sdumbbell	bool wake_up = false;
303254863Sdumbbell	int ret;
304254863Sdumbbell
305254863Sdumbbell	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
306254863Sdumbbell		if (bo->seq_valid && sequence == bo->val_seq) {
307254863Sdumbbell			DRM_ERROR(
308254863Sdumbbell			    "%s: bo->seq_valid && sequence == bo->val_seq",
309254863Sdumbbell			    __func__);
310254863Sdumbbell		}
311254863Sdumbbell
312254863Sdumbbell		ret = ttm_bo_wait_unreserved_locked(bo, interruptible);
313254863Sdumbbell
314254863Sdumbbell		if (unlikely(ret))
315254863Sdumbbell			return ret;
316254863Sdumbbell	}
317254863Sdumbbell
318254863Sdumbbell	if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
319254863Sdumbbell		wake_up = true;
320254863Sdumbbell
321254863Sdumbbell	/**
322254863Sdumbbell	 * Wake up waiters that may need to recheck for deadlock,
323254863Sdumbbell	 * if we decreased the sequence number.
324254863Sdumbbell	 */
325254863Sdumbbell	bo->val_seq = sequence;
326254863Sdumbbell	bo->seq_valid = true;
327254863Sdumbbell	if (wake_up)
328254863Sdumbbell		wakeup(bo);
329254863Sdumbbell
330254863Sdumbbell	return 0;
331254863Sdumbbell}
332254863Sdumbbell
333254863Sdumbbellint ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
334254863Sdumbbell			    bool interruptible, uint32_t sequence)
335254863Sdumbbell{
336254863Sdumbbell	struct ttm_bo_global *glob = bo->glob;
337254863Sdumbbell	int put_count, ret;
338254863Sdumbbell
339254878Sdumbbell	mtx_lock(&glob->lru_lock);
340254863Sdumbbell	ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
341254863Sdumbbell	if (likely(!ret)) {
342254863Sdumbbell		put_count = ttm_bo_del_from_lru(bo);
343254863Sdumbbell		mtx_unlock(&glob->lru_lock);
344254863Sdumbbell		ttm_bo_list_ref_sub(bo, put_count, true);
345254878Sdumbbell	} else
346254878Sdumbbell		mtx_unlock(&glob->lru_lock);
347254863Sdumbbell	return ret;
348254863Sdumbbell}
349254863Sdumbbell
350247835Skibvoid ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
351247835Skib{
352247835Skib	ttm_bo_add_to_lru(bo);
353247835Skib	atomic_set(&bo->reserved, 0);
354247835Skib	wakeup(bo);
355247835Skib}
356247835Skib
357247835Skibvoid ttm_bo_unreserve(struct ttm_buffer_object *bo)
358247835Skib{
359247835Skib	struct ttm_bo_global *glob = bo->glob;
360247835Skib
361247835Skib	mtx_lock(&glob->lru_lock);
362247835Skib	ttm_bo_unreserve_locked(bo);
363247835Skib	mtx_unlock(&glob->lru_lock);
364247835Skib}
365247835Skib
366247835Skib/*
367247835Skib * Call bo->mutex locked.
368247835Skib */
369247835Skibstatic int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
370247835Skib{
371247835Skib	struct ttm_bo_device *bdev = bo->bdev;
372247835Skib	struct ttm_bo_global *glob = bo->glob;
373247835Skib	int ret = 0;
374247835Skib	uint32_t page_flags = 0;
375247835Skib
376247835Skib	TTM_ASSERT_LOCKED(&bo->mutex);
377247835Skib	bo->ttm = NULL;
378247835Skib
379247835Skib	if (bdev->need_dma32)
380247835Skib		page_flags |= TTM_PAGE_FLAG_DMA32;
381247835Skib
382247835Skib	switch (bo->type) {
383247835Skib	case ttm_bo_type_device:
384247835Skib		if (zero_alloc)
385247835Skib			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
386247835Skib	case ttm_bo_type_kernel:
387247835Skib		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
388247835Skib						      page_flags, glob->dummy_read_page);
389247835Skib		if (unlikely(bo->ttm == NULL))
390247835Skib			ret = -ENOMEM;
391247835Skib		break;
392247835Skib	case ttm_bo_type_sg:
393247835Skib		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
394247835Skib						      page_flags | TTM_PAGE_FLAG_SG,
395247835Skib						      glob->dummy_read_page);
396247835Skib		if (unlikely(bo->ttm == NULL)) {
397247835Skib			ret = -ENOMEM;
398247835Skib			break;
399247835Skib		}
400247835Skib		bo->ttm->sg = bo->sg;
401247835Skib		break;
402247835Skib	default:
403247835Skib		printf("[TTM] Illegal buffer object type\n");
404247835Skib		ret = -EINVAL;
405247835Skib		break;
406247835Skib	}
407247835Skib
408247835Skib	return ret;
409247835Skib}
410247835Skib
411247835Skibstatic int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
412247835Skib				  struct ttm_mem_reg *mem,
413247835Skib				  bool evict, bool interruptible,
414247835Skib				  bool no_wait_gpu)
415247835Skib{
416247835Skib	struct ttm_bo_device *bdev = bo->bdev;
417247835Skib	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
418247835Skib	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
419247835Skib	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
420247835Skib	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
421247835Skib	int ret = 0;
422247835Skib
423247835Skib	if (old_is_pci || new_is_pci ||
424247835Skib	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
425247835Skib		ret = ttm_mem_io_lock(old_man, true);
426247835Skib		if (unlikely(ret != 0))
427247835Skib			goto out_err;
428247835Skib		ttm_bo_unmap_virtual_locked(bo);
429247835Skib		ttm_mem_io_unlock(old_man);
430247835Skib	}
431247835Skib
432247835Skib	/*
433247835Skib	 * Create and bind a ttm if required.
434247835Skib	 */
435247835Skib
436247835Skib	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
437247835Skib		if (bo->ttm == NULL) {
438247835Skib			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
439247835Skib			ret = ttm_bo_add_ttm(bo, zero);
440247835Skib			if (ret)
441247835Skib				goto out_err;
442247835Skib		}
443247835Skib
444247835Skib		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
445247835Skib		if (ret)
446247835Skib			goto out_err;
447247835Skib
448247835Skib		if (mem->mem_type != TTM_PL_SYSTEM) {
449247835Skib			ret = ttm_tt_bind(bo->ttm, mem);
450247835Skib			if (ret)
451247835Skib				goto out_err;
452247835Skib		}
453247835Skib
454247835Skib		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
455247835Skib			if (bdev->driver->move_notify)
456247835Skib				bdev->driver->move_notify(bo, mem);
457247835Skib			bo->mem = *mem;
458247835Skib			mem->mm_node = NULL;
459247835Skib			goto moved;
460247835Skib		}
461247835Skib	}
462247835Skib
463247835Skib	if (bdev->driver->move_notify)
464247835Skib		bdev->driver->move_notify(bo, mem);
465247835Skib
466247835Skib	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
467247835Skib	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
468247835Skib		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
469247835Skib	else if (bdev->driver->move)
470247835Skib		ret = bdev->driver->move(bo, evict, interruptible,
471247835Skib					 no_wait_gpu, mem);
472247835Skib	else
473247835Skib		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
474247835Skib
475247835Skib	if (ret) {
476247835Skib		if (bdev->driver->move_notify) {
477247835Skib			struct ttm_mem_reg tmp_mem = *mem;
478247835Skib			*mem = bo->mem;
479247835Skib			bo->mem = tmp_mem;
480247835Skib			bdev->driver->move_notify(bo, mem);
481247835Skib			bo->mem = *mem;
482254867Sdumbbell			*mem = tmp_mem;
483247835Skib		}
484247835Skib
485247835Skib		goto out_err;
486247835Skib	}
487247835Skib
488247835Skibmoved:
489247835Skib	if (bo->evicted) {
490247835Skib		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
491247835Skib		if (ret)
492247835Skib			printf("[TTM] Can not flush read caches\n");
493247835Skib		bo->evicted = false;
494247835Skib	}
495247835Skib
496247835Skib	if (bo->mem.mm_node) {
497247835Skib		bo->offset = (bo->mem.start << PAGE_SHIFT) +
498247835Skib		    bdev->man[bo->mem.mem_type].gpu_offset;
499247835Skib		bo->cur_placement = bo->mem.placement;
500247835Skib	} else
501247835Skib		bo->offset = 0;
502247835Skib
503247835Skib	return 0;
504247835Skib
505247835Skibout_err:
506247835Skib	new_man = &bdev->man[bo->mem.mem_type];
507247835Skib	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
508247835Skib		ttm_tt_unbind(bo->ttm);
509247835Skib		ttm_tt_destroy(bo->ttm);
510247835Skib		bo->ttm = NULL;
511247835Skib	}
512247835Skib
513247835Skib	return ret;
514247835Skib}
515247835Skib
516247835Skib/**
517247835Skib * Call bo::reserved.
518247835Skib * Will release GPU memory type usage on destruction.
519247835Skib * This is the place to put in driver specific hooks to release
520247835Skib * driver private resources.
521247835Skib * Will release the bo::reserved lock.
522247835Skib */
523247835Skib
524247835Skibstatic void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
525247835Skib{
526247835Skib	if (bo->bdev->driver->move_notify)
527247835Skib		bo->bdev->driver->move_notify(bo, NULL);
528247835Skib
529247835Skib	if (bo->ttm) {
530247835Skib		ttm_tt_unbind(bo->ttm);
531247835Skib		ttm_tt_destroy(bo->ttm);
532247835Skib		bo->ttm = NULL;
533247835Skib	}
534247835Skib	ttm_bo_mem_put(bo, &bo->mem);
535247835Skib
536247835Skib	atomic_set(&bo->reserved, 0);
537247835Skib	wakeup(&bo);
538247835Skib
539247835Skib	/*
540247835Skib	 * Since the final reference to this bo may not be dropped by
541247835Skib	 * the current task we have to put a memory barrier here to make
542247835Skib	 * sure the changes done in this function are always visible.
543247835Skib	 *
544247835Skib	 * This function only needs protection against the final kref_put.
545247835Skib	 */
546247835Skib	mb();
547247835Skib}
548247835Skib
549247835Skibstatic void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
550247835Skib{
551247835Skib	struct ttm_bo_device *bdev = bo->bdev;
552247835Skib	struct ttm_bo_global *glob = bo->glob;
553247835Skib	struct ttm_bo_driver *driver = bdev->driver;
554247835Skib	void *sync_obj = NULL;
555247835Skib	int put_count;
556247835Skib	int ret;
557247835Skib
558247835Skib	mtx_lock(&glob->lru_lock);
559254861Sdumbbell	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
560247835Skib
561247835Skib	mtx_lock(&bdev->fence_lock);
562247835Skib	(void) ttm_bo_wait(bo, false, false, true);
563247835Skib	if (!ret && !bo->sync_obj) {
564247835Skib		mtx_unlock(&bdev->fence_lock);
565247835Skib		put_count = ttm_bo_del_from_lru(bo);
566247835Skib
567247835Skib		mtx_unlock(&glob->lru_lock);
568247835Skib		ttm_bo_cleanup_memtype_use(bo);
569247835Skib
570247835Skib		ttm_bo_list_ref_sub(bo, put_count, true);
571247835Skib
572247835Skib		return;
573247835Skib	}
574247835Skib	if (bo->sync_obj)
575247835Skib		sync_obj = driver->sync_obj_ref(bo->sync_obj);
576247835Skib	mtx_unlock(&bdev->fence_lock);
577247835Skib
578247835Skib	if (!ret) {
579247835Skib		atomic_set(&bo->reserved, 0);
580247835Skib		wakeup(bo);
581247835Skib	}
582247835Skib
583247835Skib	refcount_acquire(&bo->list_kref);
584247835Skib	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
585247835Skib	mtx_unlock(&glob->lru_lock);
586247835Skib
587247835Skib	if (sync_obj) {
588247835Skib		driver->sync_obj_flush(sync_obj);
589247835Skib		driver->sync_obj_unref(&sync_obj);
590247835Skib	}
591247835Skib	taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
592247835Skib	    ((hz / 100) < 1) ? 1 : hz / 100);
593247835Skib}
594247835Skib
595247835Skib/**
596247835Skib * function ttm_bo_cleanup_refs_and_unlock
597247835Skib * If bo idle, remove from delayed- and lru lists, and unref.
598247835Skib * If not idle, do nothing.
599247835Skib *
600247835Skib * Must be called with lru_lock and reservation held, this function
601247835Skib * will drop both before returning.
602247835Skib *
603247835Skib * @interruptible         Any sleeps should occur interruptibly.
604247835Skib * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
605247835Skib */
606247835Skib
607247835Skibstatic int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
608247835Skib					  bool interruptible,
609247835Skib					  bool no_wait_gpu)
610247835Skib{
611247835Skib	struct ttm_bo_device *bdev = bo->bdev;
612247835Skib	struct ttm_bo_driver *driver = bdev->driver;
613247835Skib	struct ttm_bo_global *glob = bo->glob;
614247835Skib	int put_count;
615247835Skib	int ret;
616247835Skib
617247835Skib	mtx_lock(&bdev->fence_lock);
618247835Skib	ret = ttm_bo_wait(bo, false, false, true);
619247835Skib
620247835Skib	if (ret && !no_wait_gpu) {
621247835Skib		void *sync_obj;
622247835Skib
623247835Skib		/*
624247835Skib		 * Take a reference to the fence and unreserve,
625247835Skib		 * at this point the buffer should be dead, so
626247835Skib		 * no new sync objects can be attached.
627247835Skib		 */
628247835Skib		sync_obj = driver->sync_obj_ref(bo->sync_obj);
629247835Skib		mtx_unlock(&bdev->fence_lock);
630247835Skib
631247835Skib		atomic_set(&bo->reserved, 0);
632247835Skib		wakeup(bo);
633247835Skib		mtx_unlock(&glob->lru_lock);
634247835Skib
635247835Skib		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
636247835Skib		driver->sync_obj_unref(&sync_obj);
637247835Skib		if (ret)
638247835Skib			return ret;
639247835Skib
640247835Skib		/*
641247835Skib		 * remove sync_obj with ttm_bo_wait, the wait should be
642247835Skib		 * finished, and no new wait object should have been added.
643247835Skib		 */
644247835Skib		mtx_lock(&bdev->fence_lock);
645247835Skib		ret = ttm_bo_wait(bo, false, false, true);
646247835Skib		mtx_unlock(&bdev->fence_lock);
647247835Skib		if (ret)
648247835Skib			return ret;
649247835Skib
650247835Skib		mtx_lock(&glob->lru_lock);
651254861Sdumbbell		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
652247835Skib
653247835Skib		/*
654247835Skib		 * We raced, and lost, someone else holds the reservation now,
655247835Skib		 * and is probably busy in ttm_bo_cleanup_memtype_use.
656247835Skib		 *
657247835Skib		 * Even if it's not the case, because we finished waiting any
658247835Skib		 * delayed destruction would succeed, so just return success
659247835Skib		 * here.
660247835Skib		 */
661247835Skib		if (ret) {
662247835Skib			mtx_unlock(&glob->lru_lock);
663247835Skib			return 0;
664247835Skib		}
665247835Skib	} else
666247835Skib		mtx_unlock(&bdev->fence_lock);
667247835Skib
668247835Skib	if (ret || unlikely(list_empty(&bo->ddestroy))) {
669247835Skib		atomic_set(&bo->reserved, 0);
670247835Skib		wakeup(bo);
671247835Skib		mtx_unlock(&glob->lru_lock);
672247835Skib		return ret;
673247835Skib	}
674247835Skib
675247835Skib	put_count = ttm_bo_del_from_lru(bo);
676247835Skib	list_del_init(&bo->ddestroy);
677247835Skib	++put_count;
678247835Skib
679247835Skib	mtx_unlock(&glob->lru_lock);
680247835Skib	ttm_bo_cleanup_memtype_use(bo);
681247835Skib
682247835Skib	ttm_bo_list_ref_sub(bo, put_count, true);
683247835Skib
684247835Skib	return 0;
685247835Skib}
686247835Skib
687247835Skib/**
688247835Skib * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
689247835Skib * encountered buffers.
690247835Skib */
691247835Skib
692247835Skibstatic int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
693247835Skib{
694247835Skib	struct ttm_bo_global *glob = bdev->glob;
695247835Skib	struct ttm_buffer_object *entry = NULL;
696247835Skib	int ret = 0;
697247835Skib
698247835Skib	mtx_lock(&glob->lru_lock);
699247835Skib	if (list_empty(&bdev->ddestroy))
700247835Skib		goto out_unlock;
701247835Skib
702247835Skib	entry = list_first_entry(&bdev->ddestroy,
703247835Skib		struct ttm_buffer_object, ddestroy);
704247835Skib	refcount_acquire(&entry->list_kref);
705247835Skib
706247835Skib	for (;;) {
707247835Skib		struct ttm_buffer_object *nentry = NULL;
708247835Skib
709247835Skib		if (entry->ddestroy.next != &bdev->ddestroy) {
710247835Skib			nentry = list_first_entry(&entry->ddestroy,
711247835Skib				struct ttm_buffer_object, ddestroy);
712247835Skib			refcount_acquire(&nentry->list_kref);
713247835Skib		}
714247835Skib
715254861Sdumbbell		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
716254861Sdumbbell		if (remove_all && ret) {
717254861Sdumbbell			ret = ttm_bo_reserve_nolru(entry, false, false,
718254861Sdumbbell						   false, 0);
719254861Sdumbbell		}
720254861Sdumbbell
721247835Skib		if (!ret)
722247835Skib			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
723247835Skib							     !remove_all);
724247835Skib		else
725247835Skib			mtx_unlock(&glob->lru_lock);
726247835Skib
727247835Skib		if (refcount_release(&entry->list_kref))
728247835Skib			ttm_bo_release_list(entry);
729247835Skib		entry = nentry;
730247835Skib
731247835Skib		if (ret || !entry)
732247835Skib			goto out;
733247835Skib
734247835Skib		mtx_lock(&glob->lru_lock);
735247835Skib		if (list_empty(&entry->ddestroy))
736247835Skib			break;
737247835Skib	}
738247835Skib
739247835Skibout_unlock:
740247835Skib	mtx_unlock(&glob->lru_lock);
741247835Skibout:
742247835Skib	if (entry && refcount_release(&entry->list_kref))
743247835Skib		ttm_bo_release_list(entry);
744247835Skib	return ret;
745247835Skib}
746247835Skib
747247835Skibstatic void ttm_bo_delayed_workqueue(void *arg, int pending __unused)
748247835Skib{
749247835Skib	struct ttm_bo_device *bdev = arg;
750247835Skib
751247835Skib	if (ttm_bo_delayed_delete(bdev, false)) {
752247835Skib		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
753247835Skib		    ((hz / 100) < 1) ? 1 : hz / 100);
754247835Skib	}
755247835Skib}
756247835Skib
757247835Skibstatic void ttm_bo_release(struct ttm_buffer_object *bo)
758247835Skib{
759247835Skib	struct ttm_bo_device *bdev = bo->bdev;
760247835Skib	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
761247835Skib
762247835Skib	rw_wlock(&bdev->vm_lock);
763247835Skib	if (likely(bo->vm_node != NULL)) {
764247835Skib		RB_REMOVE(ttm_bo_device_buffer_objects,
765247835Skib		    &bdev->addr_space_rb, bo);
766247835Skib		drm_mm_put_block(bo->vm_node);
767247835Skib		bo->vm_node = NULL;
768247835Skib	}
769247835Skib	rw_wunlock(&bdev->vm_lock);
770247835Skib	ttm_mem_io_lock(man, false);
771247835Skib	ttm_mem_io_free_vm(bo);
772247835Skib	ttm_mem_io_unlock(man);
773247835Skib	ttm_bo_cleanup_refs_or_queue(bo);
774247835Skib	if (refcount_release(&bo->list_kref))
775247835Skib		ttm_bo_release_list(bo);
776247835Skib}
777247835Skib
778247835Skibvoid ttm_bo_unref(struct ttm_buffer_object **p_bo)
779247835Skib{
780247835Skib	struct ttm_buffer_object *bo = *p_bo;
781247835Skib
782247835Skib	*p_bo = NULL;
783247835Skib	if (refcount_release(&bo->kref))
784247835Skib		ttm_bo_release(bo);
785247835Skib}
786247835Skib
787247835Skibint ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
788247835Skib{
789247835Skib	int pending;
790247835Skib
791247835Skib	taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending);
792247835Skib	if (pending)
793247835Skib		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
794247835Skib	return (pending);
795247835Skib}
796247835Skib
797247835Skibvoid ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
798247835Skib{
799247835Skib	if (resched) {
800247835Skib		taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
801247835Skib		    ((hz / 100) < 1) ? 1 : hz / 100);
802247835Skib	}
803247835Skib}
804247835Skib
805247835Skibstatic int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
806247835Skib			bool no_wait_gpu)
807247835Skib{
808247835Skib	struct ttm_bo_device *bdev = bo->bdev;
809247835Skib	struct ttm_mem_reg evict_mem;
810247835Skib	struct ttm_placement placement;
811247835Skib	int ret = 0;
812247835Skib
813247835Skib	mtx_lock(&bdev->fence_lock);
814247835Skib	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
815247835Skib	mtx_unlock(&bdev->fence_lock);
816247835Skib
817247835Skib	if (unlikely(ret != 0)) {
818247835Skib		if (ret != -ERESTART) {
819247835Skib			printf("[TTM] Failed to expire sync object before buffer eviction\n");
820247835Skib		}
821247835Skib		goto out;
822247835Skib	}
823247835Skib
824247835Skib	MPASS(ttm_bo_is_reserved(bo));
825247835Skib
826247835Skib	evict_mem = bo->mem;
827247835Skib	evict_mem.mm_node = NULL;
828247835Skib	evict_mem.bus.io_reserved_vm = false;
829247835Skib	evict_mem.bus.io_reserved_count = 0;
830247835Skib
831247835Skib	placement.fpfn = 0;
832247835Skib	placement.lpfn = 0;
833247835Skib	placement.num_placement = 0;
834247835Skib	placement.num_busy_placement = 0;
835247835Skib	bdev->driver->evict_flags(bo, &placement);
836247835Skib	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
837247835Skib				no_wait_gpu);
838247835Skib	if (ret) {
839247835Skib		if (ret != -ERESTART) {
840247835Skib			printf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
841247835Skib			       bo);
842247835Skib			ttm_bo_mem_space_debug(bo, &placement);
843247835Skib		}
844247835Skib		goto out;
845247835Skib	}
846247835Skib
847247835Skib	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
848247835Skib				     no_wait_gpu);
849247835Skib	if (ret) {
850247835Skib		if (ret != -ERESTART)
851247835Skib			printf("[TTM] Buffer eviction failed\n");
852247835Skib		ttm_bo_mem_put(bo, &evict_mem);
853247835Skib		goto out;
854247835Skib	}
855247835Skib	bo->evicted = true;
856247835Skibout:
857247835Skib	return ret;
858247835Skib}
859247835Skib
860247835Skibstatic int ttm_mem_evict_first(struct ttm_bo_device *bdev,
861247835Skib				uint32_t mem_type,
862247835Skib				bool interruptible,
863247835Skib				bool no_wait_gpu)
864247835Skib{
865247835Skib	struct ttm_bo_global *glob = bdev->glob;
866247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
867247835Skib	struct ttm_buffer_object *bo;
868247835Skib	int ret = -EBUSY, put_count;
869247835Skib
870247835Skib	mtx_lock(&glob->lru_lock);
871247835Skib	list_for_each_entry(bo, &man->lru, lru) {
872254861Sdumbbell		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
873247835Skib		if (!ret)
874247835Skib			break;
875247835Skib	}
876247835Skib
877247835Skib	if (ret) {
878247835Skib		mtx_unlock(&glob->lru_lock);
879247835Skib		return ret;
880247835Skib	}
881247835Skib
882247835Skib	refcount_acquire(&bo->list_kref);
883247835Skib
884247835Skib	if (!list_empty(&bo->ddestroy)) {
885247835Skib		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
886247835Skib						     no_wait_gpu);
887247835Skib		if (refcount_release(&bo->list_kref))
888247835Skib			ttm_bo_release_list(bo);
889247835Skib		return ret;
890247835Skib	}
891247835Skib
892247835Skib	put_count = ttm_bo_del_from_lru(bo);
893247835Skib	mtx_unlock(&glob->lru_lock);
894247835Skib
895247835Skib	MPASS(ret == 0);
896247835Skib
897247835Skib	ttm_bo_list_ref_sub(bo, put_count, true);
898247835Skib
899247835Skib	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
900247835Skib	ttm_bo_unreserve(bo);
901247835Skib
902247835Skib	if (refcount_release(&bo->list_kref))
903247835Skib		ttm_bo_release_list(bo);
904247835Skib	return ret;
905247835Skib}
906247835Skib
907247835Skibvoid ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
908247835Skib{
909247835Skib	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
910247835Skib
911247835Skib	if (mem->mm_node)
912247835Skib		(*man->func->put_node)(man, mem);
913247835Skib}
914247835Skib
915247835Skib/**
916247835Skib * Repeatedly evict memory from the LRU for @mem_type until we create enough
917247835Skib * space, or we've evicted everything and there isn't enough space.
918247835Skib */
919247835Skibstatic int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
920247835Skib					uint32_t mem_type,
921247835Skib					struct ttm_placement *placement,
922247835Skib					struct ttm_mem_reg *mem,
923247835Skib					bool interruptible,
924247835Skib					bool no_wait_gpu)
925247835Skib{
926247835Skib	struct ttm_bo_device *bdev = bo->bdev;
927247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
928247835Skib	int ret;
929247835Skib
930247835Skib	do {
931247835Skib		ret = (*man->func->get_node)(man, bo, placement, mem);
932247835Skib		if (unlikely(ret != 0))
933247835Skib			return ret;
934247835Skib		if (mem->mm_node)
935247835Skib			break;
936247835Skib		ret = ttm_mem_evict_first(bdev, mem_type,
937247835Skib					  interruptible, no_wait_gpu);
938247835Skib		if (unlikely(ret != 0))
939247835Skib			return ret;
940247835Skib	} while (1);
941247835Skib	if (mem->mm_node == NULL)
942247835Skib		return -ENOMEM;
943247835Skib	mem->mem_type = mem_type;
944247835Skib	return 0;
945247835Skib}
946247835Skib
947247835Skibstatic uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
948247835Skib				      uint32_t cur_placement,
949247835Skib				      uint32_t proposed_placement)
950247835Skib{
951247835Skib	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
952247835Skib	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
953247835Skib
954247835Skib	/**
955247835Skib	 * Keep current caching if possible.
956247835Skib	 */
957247835Skib
958247835Skib	if ((cur_placement & caching) != 0)
959247835Skib		result |= (cur_placement & caching);
960247835Skib	else if ((man->default_caching & caching) != 0)
961247835Skib		result |= man->default_caching;
962247835Skib	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
963247835Skib		result |= TTM_PL_FLAG_CACHED;
964247835Skib	else if ((TTM_PL_FLAG_WC & caching) != 0)
965247835Skib		result |= TTM_PL_FLAG_WC;
966247835Skib	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
967247835Skib		result |= TTM_PL_FLAG_UNCACHED;
968247835Skib
969247835Skib	return result;
970247835Skib}
971247835Skib
972247835Skibstatic bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
973247835Skib				 uint32_t mem_type,
974247835Skib				 uint32_t proposed_placement,
975247835Skib				 uint32_t *masked_placement)
976247835Skib{
977247835Skib	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
978247835Skib
979247835Skib	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
980247835Skib		return false;
981247835Skib
982247835Skib	if ((proposed_placement & man->available_caching) == 0)
983247835Skib		return false;
984247835Skib
985247835Skib	cur_flags |= (proposed_placement & man->available_caching);
986247835Skib
987247835Skib	*masked_placement = cur_flags;
988247835Skib	return true;
989247835Skib}
990247835Skib
991247835Skib/**
992247835Skib * Creates space for memory region @mem according to its type.
993247835Skib *
994247835Skib * This function first searches for free space in compatible memory types in
995247835Skib * the priority order defined by the driver.  If free space isn't found, then
996247835Skib * ttm_bo_mem_force_space is attempted in priority order to evict and find
997247835Skib * space.
998247835Skib */
999247835Skibint ttm_bo_mem_space(struct ttm_buffer_object *bo,
1000247835Skib			struct ttm_placement *placement,
1001247835Skib			struct ttm_mem_reg *mem,
1002247835Skib			bool interruptible,
1003247835Skib			bool no_wait_gpu)
1004247835Skib{
1005247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1006247835Skib	struct ttm_mem_type_manager *man;
1007247835Skib	uint32_t mem_type = TTM_PL_SYSTEM;
1008247835Skib	uint32_t cur_flags = 0;
1009247835Skib	bool type_found = false;
1010247835Skib	bool type_ok = false;
1011247835Skib	bool has_erestartsys = false;
1012247835Skib	int i, ret;
1013247835Skib
1014247835Skib	mem->mm_node = NULL;
1015247835Skib	for (i = 0; i < placement->num_placement; ++i) {
1016247835Skib		ret = ttm_mem_type_from_flags(placement->placement[i],
1017247835Skib						&mem_type);
1018247835Skib		if (ret)
1019247835Skib			return ret;
1020247835Skib		man = &bdev->man[mem_type];
1021247835Skib
1022247835Skib		type_ok = ttm_bo_mt_compatible(man,
1023247835Skib						mem_type,
1024247835Skib						placement->placement[i],
1025247835Skib						&cur_flags);
1026247835Skib
1027247835Skib		if (!type_ok)
1028247835Skib			continue;
1029247835Skib
1030247835Skib		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1031247835Skib						  cur_flags);
1032247835Skib		/*
1033247835Skib		 * Use the access and other non-mapping-related flag bits from
1034247835Skib		 * the memory placement flags to the current flags
1035247835Skib		 */
1036247835Skib		ttm_flag_masked(&cur_flags, placement->placement[i],
1037247835Skib				~TTM_PL_MASK_MEMTYPE);
1038247835Skib
1039247835Skib		if (mem_type == TTM_PL_SYSTEM)
1040247835Skib			break;
1041247835Skib
1042247835Skib		if (man->has_type && man->use_type) {
1043247835Skib			type_found = true;
1044247835Skib			ret = (*man->func->get_node)(man, bo, placement, mem);
1045247835Skib			if (unlikely(ret))
1046247835Skib				return ret;
1047247835Skib		}
1048247835Skib		if (mem->mm_node)
1049247835Skib			break;
1050247835Skib	}
1051247835Skib
1052247835Skib	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1053247835Skib		mem->mem_type = mem_type;
1054247835Skib		mem->placement = cur_flags;
1055247835Skib		return 0;
1056247835Skib	}
1057247835Skib
1058247835Skib	if (!type_found)
1059247835Skib		return -EINVAL;
1060247835Skib
1061247835Skib	for (i = 0; i < placement->num_busy_placement; ++i) {
1062247835Skib		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1063247835Skib						&mem_type);
1064247835Skib		if (ret)
1065247835Skib			return ret;
1066247835Skib		man = &bdev->man[mem_type];
1067247835Skib		if (!man->has_type)
1068247835Skib			continue;
1069247835Skib		if (!ttm_bo_mt_compatible(man,
1070247835Skib						mem_type,
1071247835Skib						placement->busy_placement[i],
1072247835Skib						&cur_flags))
1073247835Skib			continue;
1074247835Skib
1075247835Skib		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1076247835Skib						  cur_flags);
1077247835Skib		/*
1078247835Skib		 * Use the access and other non-mapping-related flag bits from
1079247835Skib		 * the memory placement flags to the current flags
1080247835Skib		 */
1081247835Skib		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1082247835Skib				~TTM_PL_MASK_MEMTYPE);
1083247835Skib
1084247835Skib
1085247835Skib		if (mem_type == TTM_PL_SYSTEM) {
1086247835Skib			mem->mem_type = mem_type;
1087247835Skib			mem->placement = cur_flags;
1088247835Skib			mem->mm_node = NULL;
1089247835Skib			return 0;
1090247835Skib		}
1091247835Skib
1092247835Skib		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1093247835Skib						interruptible, no_wait_gpu);
1094247835Skib		if (ret == 0 && mem->mm_node) {
1095247835Skib			mem->placement = cur_flags;
1096247835Skib			return 0;
1097247835Skib		}
1098247835Skib		if (ret == -ERESTART)
1099247835Skib			has_erestartsys = true;
1100247835Skib	}
1101247835Skib	ret = (has_erestartsys) ? -ERESTART : -ENOMEM;
1102247835Skib	return ret;
1103247835Skib}
1104247835Skib
1105247835Skibstatic
1106247835Skibint ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1107247835Skib			struct ttm_placement *placement,
1108247835Skib			bool interruptible,
1109247835Skib			bool no_wait_gpu)
1110247835Skib{
1111247835Skib	int ret = 0;
1112247835Skib	struct ttm_mem_reg mem;
1113247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1114247835Skib
1115247835Skib	MPASS(ttm_bo_is_reserved(bo));
1116247835Skib
1117247835Skib	/*
1118247835Skib	 * FIXME: It's possible to pipeline buffer moves.
1119247835Skib	 * Have the driver move function wait for idle when necessary,
1120247835Skib	 * instead of doing it here.
1121247835Skib	 */
1122247835Skib	mtx_lock(&bdev->fence_lock);
1123247835Skib	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1124247835Skib	mtx_unlock(&bdev->fence_lock);
1125247835Skib	if (ret)
1126247835Skib		return ret;
1127247835Skib	mem.num_pages = bo->num_pages;
1128247835Skib	mem.size = mem.num_pages << PAGE_SHIFT;
1129247835Skib	mem.page_alignment = bo->mem.page_alignment;
1130247835Skib	mem.bus.io_reserved_vm = false;
1131247835Skib	mem.bus.io_reserved_count = 0;
1132247835Skib	/*
1133247835Skib	 * Determine where to move the buffer.
1134247835Skib	 */
1135247835Skib	ret = ttm_bo_mem_space(bo, placement, &mem,
1136247835Skib			       interruptible, no_wait_gpu);
1137247835Skib	if (ret)
1138247835Skib		goto out_unlock;
1139247835Skib	ret = ttm_bo_handle_move_mem(bo, &mem, false,
1140247835Skib				     interruptible, no_wait_gpu);
1141247835Skibout_unlock:
1142247835Skib	if (ret && mem.mm_node)
1143247835Skib		ttm_bo_mem_put(bo, &mem);
1144247835Skib	return ret;
1145247835Skib}
1146247835Skib
1147247835Skibstatic int ttm_bo_mem_compat(struct ttm_placement *placement,
1148247835Skib			     struct ttm_mem_reg *mem)
1149247835Skib{
1150247835Skib	int i;
1151247835Skib
1152247835Skib	if (mem->mm_node && placement->lpfn != 0 &&
1153247835Skib	    (mem->start < placement->fpfn ||
1154247835Skib	     mem->start + mem->num_pages > placement->lpfn))
1155247835Skib		return -1;
1156247835Skib
1157247835Skib	for (i = 0; i < placement->num_placement; i++) {
1158247835Skib		if ((placement->placement[i] & mem->placement &
1159247835Skib			TTM_PL_MASK_CACHING) &&
1160247835Skib			(placement->placement[i] & mem->placement &
1161247835Skib			TTM_PL_MASK_MEM))
1162247835Skib			return i;
1163247835Skib	}
1164247835Skib	return -1;
1165247835Skib}
1166247835Skib
1167247835Skibint ttm_bo_validate(struct ttm_buffer_object *bo,
1168247835Skib			struct ttm_placement *placement,
1169247835Skib			bool interruptible,
1170247835Skib			bool no_wait_gpu)
1171247835Skib{
1172247835Skib	int ret;
1173247835Skib
1174247835Skib	MPASS(ttm_bo_is_reserved(bo));
1175247835Skib	/* Check that range is valid */
1176247835Skib	if (placement->lpfn || placement->fpfn)
1177247835Skib		if (placement->fpfn > placement->lpfn ||
1178247835Skib			(placement->lpfn - placement->fpfn) < bo->num_pages)
1179247835Skib			return -EINVAL;
1180247835Skib	/*
1181247835Skib	 * Check whether we need to move buffer.
1182247835Skib	 */
1183247835Skib	ret = ttm_bo_mem_compat(placement, &bo->mem);
1184247835Skib	if (ret < 0) {
1185247835Skib		ret = ttm_bo_move_buffer(bo, placement, interruptible,
1186247835Skib					 no_wait_gpu);
1187247835Skib		if (ret)
1188247835Skib			return ret;
1189247835Skib	} else {
1190247835Skib		/*
1191247835Skib		 * Use the access and other non-mapping-related flag bits from
1192247835Skib		 * the compatible memory placement flags to the active flags
1193247835Skib		 */
1194247835Skib		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1195247835Skib				~TTM_PL_MASK_MEMTYPE);
1196247835Skib	}
1197247835Skib	/*
1198247835Skib	 * We might need to add a TTM.
1199247835Skib	 */
1200247835Skib	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1201247835Skib		ret = ttm_bo_add_ttm(bo, true);
1202247835Skib		if (ret)
1203247835Skib			return ret;
1204247835Skib	}
1205247835Skib	return 0;
1206247835Skib}
1207247835Skib
1208247835Skibint ttm_bo_check_placement(struct ttm_buffer_object *bo,
1209247835Skib				struct ttm_placement *placement)
1210247835Skib{
1211247835Skib	MPASS(!((placement->fpfn || placement->lpfn) &&
1212247835Skib	    (bo->mem.num_pages > (placement->lpfn - placement->fpfn))));
1213247835Skib
1214247835Skib	return 0;
1215247835Skib}
1216247835Skib
1217247835Skibint ttm_bo_init(struct ttm_bo_device *bdev,
1218247835Skib		struct ttm_buffer_object *bo,
1219247835Skib		unsigned long size,
1220247835Skib		enum ttm_bo_type type,
1221247835Skib		struct ttm_placement *placement,
1222247835Skib		uint32_t page_alignment,
1223247835Skib		bool interruptible,
1224247835Skib		struct vm_object *persistent_swap_storage,
1225247835Skib		size_t acc_size,
1226247835Skib		struct sg_table *sg,
1227247835Skib		void (*destroy) (struct ttm_buffer_object *))
1228247835Skib{
1229247835Skib	int ret = 0;
1230247835Skib	unsigned long num_pages;
1231247835Skib	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1232247835Skib
1233247835Skib	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1234247835Skib	if (ret) {
1235247835Skib		printf("[TTM] Out of kernel memory\n");
1236247835Skib		if (destroy)
1237247835Skib			(*destroy)(bo);
1238247835Skib		else
1239247835Skib			free(bo, M_TTM_BO);
1240247835Skib		return -ENOMEM;
1241247835Skib	}
1242247835Skib
1243247835Skib	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1244247835Skib	if (num_pages == 0) {
1245247835Skib		printf("[TTM] Illegal buffer object size\n");
1246247835Skib		if (destroy)
1247247835Skib			(*destroy)(bo);
1248247835Skib		else
1249247835Skib			free(bo, M_TTM_BO);
1250247835Skib		ttm_mem_global_free(mem_glob, acc_size);
1251247835Skib		return -EINVAL;
1252247835Skib	}
1253247835Skib	bo->destroy = destroy;
1254247835Skib
1255247835Skib	refcount_init(&bo->kref, 1);
1256247835Skib	refcount_init(&bo->list_kref, 1);
1257247835Skib	atomic_set(&bo->cpu_writers, 0);
1258247835Skib	atomic_set(&bo->reserved, 1);
1259247835Skib	INIT_LIST_HEAD(&bo->lru);
1260247835Skib	INIT_LIST_HEAD(&bo->ddestroy);
1261247835Skib	INIT_LIST_HEAD(&bo->swap);
1262247835Skib	INIT_LIST_HEAD(&bo->io_reserve_lru);
1263247835Skib	bo->bdev = bdev;
1264247835Skib	bo->glob = bdev->glob;
1265247835Skib	bo->type = type;
1266247835Skib	bo->num_pages = num_pages;
1267247835Skib	bo->mem.size = num_pages << PAGE_SHIFT;
1268247835Skib	bo->mem.mem_type = TTM_PL_SYSTEM;
1269247835Skib	bo->mem.num_pages = bo->num_pages;
1270247835Skib	bo->mem.mm_node = NULL;
1271247835Skib	bo->mem.page_alignment = page_alignment;
1272247835Skib	bo->mem.bus.io_reserved_vm = false;
1273247835Skib	bo->mem.bus.io_reserved_count = 0;
1274247835Skib	bo->priv_flags = 0;
1275247835Skib	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1276247835Skib	bo->seq_valid = false;
1277247835Skib	bo->persistent_swap_storage = persistent_swap_storage;
1278247835Skib	bo->acc_size = acc_size;
1279247835Skib	bo->sg = sg;
1280247835Skib	atomic_inc(&bo->glob->bo_count);
1281247835Skib
1282247835Skib	ret = ttm_bo_check_placement(bo, placement);
1283247835Skib	if (unlikely(ret != 0))
1284247835Skib		goto out_err;
1285247835Skib
1286247835Skib	/*
1287247835Skib	 * For ttm_bo_type_device buffers, allocate
1288247835Skib	 * address space from the device.
1289247835Skib	 */
1290247835Skib	if (bo->type == ttm_bo_type_device ||
1291247835Skib	    bo->type == ttm_bo_type_sg) {
1292247835Skib		ret = ttm_bo_setup_vm(bo);
1293247835Skib		if (ret)
1294247835Skib			goto out_err;
1295247835Skib	}
1296247835Skib
1297247835Skib	ret = ttm_bo_validate(bo, placement, interruptible, false);
1298247835Skib	if (ret)
1299247835Skib		goto out_err;
1300247835Skib
1301247835Skib	ttm_bo_unreserve(bo);
1302247835Skib	return 0;
1303247835Skib
1304247835Skibout_err:
1305247835Skib	ttm_bo_unreserve(bo);
1306247835Skib	ttm_bo_unref(&bo);
1307247835Skib
1308247835Skib	return ret;
1309247835Skib}
1310247835Skib
1311247835Skibsize_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1312247835Skib		       unsigned long bo_size,
1313247835Skib		       unsigned struct_size)
1314247835Skib{
1315247835Skib	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1316247835Skib	size_t size = 0;
1317247835Skib
1318247835Skib	size += ttm_round_pot(struct_size);
1319247835Skib	size += PAGE_ALIGN(npages * sizeof(void *));
1320247835Skib	size += ttm_round_pot(sizeof(struct ttm_tt));
1321247835Skib	return size;
1322247835Skib}
1323247835Skib
1324247835Skibsize_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1325247835Skib			   unsigned long bo_size,
1326247835Skib			   unsigned struct_size)
1327247835Skib{
1328247835Skib	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1329247835Skib	size_t size = 0;
1330247835Skib
1331247835Skib	size += ttm_round_pot(struct_size);
1332247835Skib	size += PAGE_ALIGN(npages * sizeof(void *));
1333247835Skib	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1334247835Skib	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1335247835Skib	return size;
1336247835Skib}
1337247835Skib
1338247835Skibint ttm_bo_create(struct ttm_bo_device *bdev,
1339247835Skib			unsigned long size,
1340247835Skib			enum ttm_bo_type type,
1341247835Skib			struct ttm_placement *placement,
1342247835Skib			uint32_t page_alignment,
1343247835Skib			bool interruptible,
1344247835Skib			struct vm_object *persistent_swap_storage,
1345247835Skib			struct ttm_buffer_object **p_bo)
1346247835Skib{
1347247835Skib	struct ttm_buffer_object *bo;
1348247835Skib	size_t acc_size;
1349247835Skib	int ret;
1350247835Skib
1351247835Skib	bo = malloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
1352247835Skib	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1353247835Skib	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1354247835Skib			  interruptible, persistent_swap_storage, acc_size,
1355247835Skib			  NULL, NULL);
1356247835Skib	if (likely(ret == 0))
1357247835Skib		*p_bo = bo;
1358247835Skib
1359247835Skib	return ret;
1360247835Skib}
1361247835Skib
1362247835Skibstatic int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1363247835Skib					unsigned mem_type, bool allow_errors)
1364247835Skib{
1365247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1366247835Skib	struct ttm_bo_global *glob = bdev->glob;
1367247835Skib	int ret;
1368247835Skib
1369247835Skib	/*
1370247835Skib	 * Can't use standard list traversal since we're unlocking.
1371247835Skib	 */
1372247835Skib
1373247835Skib	mtx_lock(&glob->lru_lock);
1374247835Skib	while (!list_empty(&man->lru)) {
1375247835Skib		mtx_unlock(&glob->lru_lock);
1376247835Skib		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1377247835Skib		if (ret) {
1378247835Skib			if (allow_errors) {
1379247835Skib				return ret;
1380247835Skib			} else {
1381247835Skib				printf("[TTM] Cleanup eviction failed\n");
1382247835Skib			}
1383247835Skib		}
1384247835Skib		mtx_lock(&glob->lru_lock);
1385247835Skib	}
1386247835Skib	mtx_unlock(&glob->lru_lock);
1387247835Skib	return 0;
1388247835Skib}
1389247835Skib
1390247835Skibint ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1391247835Skib{
1392247835Skib	struct ttm_mem_type_manager *man;
1393247835Skib	int ret = -EINVAL;
1394247835Skib
1395247835Skib	if (mem_type >= TTM_NUM_MEM_TYPES) {
1396247835Skib		printf("[TTM] Illegal memory type %d\n", mem_type);
1397247835Skib		return ret;
1398247835Skib	}
1399247835Skib	man = &bdev->man[mem_type];
1400247835Skib
1401247835Skib	if (!man->has_type) {
1402247835Skib		printf("[TTM] Trying to take down uninitialized memory manager type %u\n",
1403247835Skib		       mem_type);
1404247835Skib		return ret;
1405247835Skib	}
1406247835Skib
1407247835Skib	man->use_type = false;
1408247835Skib	man->has_type = false;
1409247835Skib
1410247835Skib	ret = 0;
1411247835Skib	if (mem_type > 0) {
1412247835Skib		ttm_bo_force_list_clean(bdev, mem_type, false);
1413247835Skib
1414247835Skib		ret = (*man->func->takedown)(man);
1415247835Skib	}
1416247835Skib
1417247835Skib	return ret;
1418247835Skib}
1419247835Skib
1420247835Skibint ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1421247835Skib{
1422247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1423247835Skib
1424247835Skib	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1425247835Skib		printf("[TTM] Illegal memory manager memory type %u\n", mem_type);
1426247835Skib		return -EINVAL;
1427247835Skib	}
1428247835Skib
1429247835Skib	if (!man->has_type) {
1430247835Skib		printf("[TTM] Memory type %u has not been initialized\n", mem_type);
1431247835Skib		return 0;
1432247835Skib	}
1433247835Skib
1434247835Skib	return ttm_bo_force_list_clean(bdev, mem_type, true);
1435247835Skib}
1436247835Skib
1437247835Skibint ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1438247835Skib			unsigned long p_size)
1439247835Skib{
1440247835Skib	int ret = -EINVAL;
1441247835Skib	struct ttm_mem_type_manager *man;
1442247835Skib
1443247835Skib	MPASS(type < TTM_NUM_MEM_TYPES);
1444247835Skib	man = &bdev->man[type];
1445247835Skib	MPASS(!man->has_type);
1446247835Skib	man->io_reserve_fastpath = true;
1447247835Skib	man->use_io_reserve_lru = false;
1448247835Skib	sx_init(&man->io_reserve_mutex, "ttmman");
1449247835Skib	INIT_LIST_HEAD(&man->io_reserve_lru);
1450247835Skib
1451247835Skib	ret = bdev->driver->init_mem_type(bdev, type, man);
1452247835Skib	if (ret)
1453247835Skib		return ret;
1454247835Skib	man->bdev = bdev;
1455247835Skib
1456247835Skib	ret = 0;
1457247835Skib	if (type != TTM_PL_SYSTEM) {
1458247835Skib		ret = (*man->func->init)(man, p_size);
1459247835Skib		if (ret)
1460247835Skib			return ret;
1461247835Skib	}
1462247835Skib	man->has_type = true;
1463247835Skib	man->use_type = true;
1464247835Skib	man->size = p_size;
1465247835Skib
1466247835Skib	INIT_LIST_HEAD(&man->lru);
1467247835Skib
1468247835Skib	return 0;
1469247835Skib}
1470247835Skib
1471247835Skibstatic void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
1472247835Skib{
1473247835Skib
1474247835Skib	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1475247835Skib	vm_page_free(glob->dummy_read_page);
1476247835Skib}
1477247835Skib
1478247835Skibvoid ttm_bo_global_release(struct drm_global_reference *ref)
1479247835Skib{
1480247835Skib	struct ttm_bo_global *glob = ref->object;
1481247835Skib
1482247835Skib	if (refcount_release(&glob->kobj_ref))
1483247835Skib		ttm_bo_global_kobj_release(glob);
1484247835Skib}
1485247835Skib
1486247835Skibint ttm_bo_global_init(struct drm_global_reference *ref)
1487247835Skib{
1488247835Skib	struct ttm_bo_global_ref *bo_ref =
1489247835Skib		container_of(ref, struct ttm_bo_global_ref, ref);
1490247835Skib	struct ttm_bo_global *glob = ref->object;
1491247835Skib	int ret;
1492247835Skib
1493247835Skib	sx_init(&glob->device_list_mutex, "ttmdlm");
1494247835Skib	mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
1495247835Skib	glob->mem_glob = bo_ref->mem_glob;
1496247835Skib	glob->dummy_read_page = vm_page_alloc_contig(NULL, 0,
1497247835Skib	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ,
1498247835Skib	    1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
1499247835Skib
1500247835Skib	if (unlikely(glob->dummy_read_page == NULL)) {
1501247835Skib		ret = -ENOMEM;
1502247835Skib		goto out_no_drp;
1503247835Skib	}
1504247835Skib
1505247835Skib	INIT_LIST_HEAD(&glob->swap_lru);
1506247835Skib	INIT_LIST_HEAD(&glob->device_list);
1507247835Skib
1508247835Skib	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1509247835Skib	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1510247835Skib	if (unlikely(ret != 0)) {
1511247835Skib		printf("[TTM] Could not register buffer object swapout\n");
1512247835Skib		goto out_no_shrink;
1513247835Skib	}
1514247835Skib
1515247835Skib	atomic_set(&glob->bo_count, 0);
1516247835Skib
1517247835Skib	refcount_init(&glob->kobj_ref, 1);
1518247835Skib	return (0);
1519247835Skib
1520247835Skibout_no_shrink:
1521247835Skib	vm_page_free(glob->dummy_read_page);
1522247835Skibout_no_drp:
1523247835Skib	free(glob, M_DRM_GLOBAL);
1524247835Skib	return ret;
1525247835Skib}
1526247835Skib
1527247835Skibint ttm_bo_device_release(struct ttm_bo_device *bdev)
1528247835Skib{
1529247835Skib	int ret = 0;
1530247835Skib	unsigned i = TTM_NUM_MEM_TYPES;
1531247835Skib	struct ttm_mem_type_manager *man;
1532247835Skib	struct ttm_bo_global *glob = bdev->glob;
1533247835Skib
1534247835Skib	while (i--) {
1535247835Skib		man = &bdev->man[i];
1536247835Skib		if (man->has_type) {
1537247835Skib			man->use_type = false;
1538247835Skib			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1539247835Skib				ret = -EBUSY;
1540247835Skib				printf("[TTM] DRM memory manager type %d is not clean\n",
1541247835Skib				       i);
1542247835Skib			}
1543247835Skib			man->has_type = false;
1544247835Skib		}
1545247835Skib	}
1546247835Skib
1547247835Skib	sx_xlock(&glob->device_list_mutex);
1548247835Skib	list_del(&bdev->device_list);
1549247835Skib	sx_xunlock(&glob->device_list_mutex);
1550247835Skib
1551247835Skib	if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL))
1552247835Skib		taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
1553247835Skib
1554247835Skib	while (ttm_bo_delayed_delete(bdev, true))
1555247835Skib		;
1556247835Skib
1557247835Skib	mtx_lock(&glob->lru_lock);
1558247835Skib	if (list_empty(&bdev->ddestroy))
1559247835Skib		TTM_DEBUG("Delayed destroy list was clean\n");
1560247835Skib
1561247835Skib	if (list_empty(&bdev->man[0].lru))
1562247835Skib		TTM_DEBUG("Swap list was clean\n");
1563247835Skib	mtx_unlock(&glob->lru_lock);
1564247835Skib
1565247835Skib	MPASS(drm_mm_clean(&bdev->addr_space_mm));
1566247835Skib	rw_wlock(&bdev->vm_lock);
1567247835Skib	drm_mm_takedown(&bdev->addr_space_mm);
1568247835Skib	rw_wunlock(&bdev->vm_lock);
1569247835Skib
1570247835Skib	return ret;
1571247835Skib}
1572247835Skib
1573247835Skibint ttm_bo_device_init(struct ttm_bo_device *bdev,
1574247835Skib		       struct ttm_bo_global *glob,
1575247835Skib		       struct ttm_bo_driver *driver,
1576247835Skib		       uint64_t file_page_offset,
1577247835Skib		       bool need_dma32)
1578247835Skib{
1579247835Skib	int ret = -EINVAL;
1580247835Skib
1581247835Skib	rw_init(&bdev->vm_lock, "ttmvml");
1582247835Skib	bdev->driver = driver;
1583247835Skib
1584247835Skib	memset(bdev->man, 0, sizeof(bdev->man));
1585247835Skib
1586247835Skib	/*
1587247835Skib	 * Initialize the system memory buffer type.
1588247835Skib	 * Other types need to be driver / IOCTL initialized.
1589247835Skib	 */
1590247835Skib	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1591247835Skib	if (unlikely(ret != 0))
1592247835Skib		goto out_no_sys;
1593247835Skib
1594247835Skib	RB_INIT(&bdev->addr_space_rb);
1595247835Skib	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1596247835Skib	if (unlikely(ret != 0))
1597247835Skib		goto out_no_addr_mm;
1598247835Skib
1599247835Skib	TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0,
1600247835Skib	    ttm_bo_delayed_workqueue, bdev);
1601247835Skib	INIT_LIST_HEAD(&bdev->ddestroy);
1602247835Skib	bdev->dev_mapping = NULL;
1603247835Skib	bdev->glob = glob;
1604247835Skib	bdev->need_dma32 = need_dma32;
1605247835Skib	bdev->val_seq = 0;
1606247835Skib	mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF);
1607247835Skib	sx_xlock(&glob->device_list_mutex);
1608247835Skib	list_add_tail(&bdev->device_list, &glob->device_list);
1609247835Skib	sx_xunlock(&glob->device_list_mutex);
1610247835Skib
1611247835Skib	return 0;
1612247835Skibout_no_addr_mm:
1613247835Skib	ttm_bo_clean_mm(bdev, 0);
1614247835Skibout_no_sys:
1615247835Skib	return ret;
1616247835Skib}
1617247835Skib
1618247835Skib/*
1619247835Skib * buffer object vm functions.
1620247835Skib */
1621247835Skib
1622247835Skibbool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1623247835Skib{
1624247835Skib	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1625247835Skib
1626247835Skib	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1627247835Skib		if (mem->mem_type == TTM_PL_SYSTEM)
1628247835Skib			return false;
1629247835Skib
1630247835Skib		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1631247835Skib			return false;
1632247835Skib
1633247835Skib		if (mem->placement & TTM_PL_FLAG_CACHED)
1634247835Skib			return false;
1635247835Skib	}
1636247835Skib	return true;
1637247835Skib}
1638247835Skib
1639247835Skibvoid ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1640247835Skib{
1641247835Skib
1642254876Sdumbbell	ttm_bo_release_mmap(bo);
1643247835Skib	ttm_mem_io_free_vm(bo);
1644247835Skib}
1645247835Skib
1646247835Skibvoid ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1647247835Skib{
1648247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1649247835Skib	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1650247835Skib
1651247835Skib	ttm_mem_io_lock(man, false);
1652247835Skib	ttm_bo_unmap_virtual_locked(bo);
1653247835Skib	ttm_mem_io_unlock(man);
1654247835Skib}
1655247835Skib
1656247835Skibstatic void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1657247835Skib{
1658247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1659247835Skib
1660247835Skib	/* The caller acquired bdev->vm_lock. */
1661247835Skib	RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
1662247835Skib}
1663247835Skib
1664247835Skib/**
1665247835Skib * ttm_bo_setup_vm:
1666247835Skib *
1667247835Skib * @bo: the buffer to allocate address space for
1668247835Skib *
1669247835Skib * Allocate address space in the drm device so that applications
1670247835Skib * can mmap the buffer and access the contents. This only
1671247835Skib * applies to ttm_bo_type_device objects as others are not
1672247835Skib * placed in the drm device address space.
1673247835Skib */
1674247835Skib
1675247835Skibstatic int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1676247835Skib{
1677247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1678247835Skib	int ret;
1679247835Skib
1680247835Skibretry_pre_get:
1681247835Skib	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1682247835Skib	if (unlikely(ret != 0))
1683247835Skib		return ret;
1684247835Skib
1685247835Skib	rw_wlock(&bdev->vm_lock);
1686247835Skib	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1687247835Skib					 bo->mem.num_pages, 0, 0);
1688247835Skib
1689247835Skib	if (unlikely(bo->vm_node == NULL)) {
1690247835Skib		ret = -ENOMEM;
1691247835Skib		goto out_unlock;
1692247835Skib	}
1693247835Skib
1694247835Skib	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1695247835Skib					      bo->mem.num_pages, 0);
1696247835Skib
1697247835Skib	if (unlikely(bo->vm_node == NULL)) {
1698247835Skib		rw_wunlock(&bdev->vm_lock);
1699247835Skib		goto retry_pre_get;
1700247835Skib	}
1701247835Skib
1702247835Skib	ttm_bo_vm_insert_rb(bo);
1703247835Skib	rw_wunlock(&bdev->vm_lock);
1704247835Skib	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1705247835Skib
1706247835Skib	return 0;
1707247835Skibout_unlock:
1708247835Skib	rw_wunlock(&bdev->vm_lock);
1709247835Skib	return ret;
1710247835Skib}
1711247835Skib
1712247835Skibint ttm_bo_wait(struct ttm_buffer_object *bo,
1713247835Skib		bool lazy, bool interruptible, bool no_wait)
1714247835Skib{
1715247835Skib	struct ttm_bo_driver *driver = bo->bdev->driver;
1716247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1717247835Skib	void *sync_obj;
1718247835Skib	int ret = 0;
1719247835Skib
1720247835Skib	if (likely(bo->sync_obj == NULL))
1721247835Skib		return 0;
1722247835Skib
1723247835Skib	while (bo->sync_obj) {
1724247835Skib
1725247835Skib		if (driver->sync_obj_signaled(bo->sync_obj)) {
1726247835Skib			void *tmp_obj = bo->sync_obj;
1727247835Skib			bo->sync_obj = NULL;
1728255044Sjkim			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1729247835Skib			mtx_unlock(&bdev->fence_lock);
1730247835Skib			driver->sync_obj_unref(&tmp_obj);
1731247835Skib			mtx_lock(&bdev->fence_lock);
1732247835Skib			continue;
1733247835Skib		}
1734247835Skib
1735247835Skib		if (no_wait)
1736247835Skib			return -EBUSY;
1737247835Skib
1738247835Skib		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1739247835Skib		mtx_unlock(&bdev->fence_lock);
1740247835Skib		ret = driver->sync_obj_wait(sync_obj,
1741247835Skib					    lazy, interruptible);
1742247835Skib		if (unlikely(ret != 0)) {
1743247835Skib			driver->sync_obj_unref(&sync_obj);
1744247835Skib			mtx_lock(&bdev->fence_lock);
1745247835Skib			return ret;
1746247835Skib		}
1747247835Skib		mtx_lock(&bdev->fence_lock);
1748247835Skib		if (likely(bo->sync_obj == sync_obj)) {
1749247835Skib			void *tmp_obj = bo->sync_obj;
1750247835Skib			bo->sync_obj = NULL;
1751255044Sjkim			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1752255044Sjkim				  &bo->priv_flags);
1753247835Skib			mtx_unlock(&bdev->fence_lock);
1754247835Skib			driver->sync_obj_unref(&sync_obj);
1755247835Skib			driver->sync_obj_unref(&tmp_obj);
1756247835Skib			mtx_lock(&bdev->fence_lock);
1757247835Skib		} else {
1758247835Skib			mtx_unlock(&bdev->fence_lock);
1759247835Skib			driver->sync_obj_unref(&sync_obj);
1760247835Skib			mtx_lock(&bdev->fence_lock);
1761247835Skib		}
1762247835Skib	}
1763247835Skib	return 0;
1764247835Skib}
1765247835Skib
1766247835Skibint ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1767247835Skib{
1768247835Skib	struct ttm_bo_device *bdev = bo->bdev;
1769247835Skib	int ret = 0;
1770247835Skib
1771247835Skib	/*
1772247835Skib	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1773247835Skib	 */
1774247835Skib
1775247835Skib	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1776247835Skib	if (unlikely(ret != 0))
1777247835Skib		return ret;
1778247835Skib	mtx_lock(&bdev->fence_lock);
1779247835Skib	ret = ttm_bo_wait(bo, false, true, no_wait);
1780247835Skib	mtx_unlock(&bdev->fence_lock);
1781247835Skib	if (likely(ret == 0))
1782247835Skib		atomic_inc(&bo->cpu_writers);
1783247835Skib	ttm_bo_unreserve(bo);
1784247835Skib	return ret;
1785247835Skib}
1786247835Skib
1787247835Skibvoid ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1788247835Skib{
1789247835Skib	atomic_dec(&bo->cpu_writers);
1790247835Skib}
1791247835Skib
1792247835Skib/**
1793247835Skib * A buffer object shrink method that tries to swap out the first
1794247835Skib * buffer object on the bo_global::swap_lru list.
1795247835Skib */
1796247835Skib
1797247835Skibstatic int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1798247835Skib{
1799247835Skib	struct ttm_bo_global *glob =
1800247835Skib	    container_of(shrink, struct ttm_bo_global, shrink);
1801247835Skib	struct ttm_buffer_object *bo;
1802247835Skib	int ret = -EBUSY;
1803247835Skib	int put_count;
1804247835Skib	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1805247835Skib
1806247835Skib	mtx_lock(&glob->lru_lock);
1807247835Skib	list_for_each_entry(bo, &glob->swap_lru, swap) {
1808254861Sdumbbell		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1809247835Skib		if (!ret)
1810247835Skib			break;
1811247835Skib	}
1812247835Skib
1813247835Skib	if (ret) {
1814247835Skib		mtx_unlock(&glob->lru_lock);
1815247835Skib		return ret;
1816247835Skib	}
1817247835Skib
1818247835Skib	refcount_acquire(&bo->list_kref);
1819247835Skib
1820247835Skib	if (!list_empty(&bo->ddestroy)) {
1821247835Skib		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1822247835Skib		if (refcount_release(&bo->list_kref))
1823247835Skib			ttm_bo_release_list(bo);
1824247835Skib		return ret;
1825247835Skib	}
1826247835Skib
1827247835Skib	put_count = ttm_bo_del_from_lru(bo);
1828247835Skib	mtx_unlock(&glob->lru_lock);
1829247835Skib
1830247835Skib	ttm_bo_list_ref_sub(bo, put_count, true);
1831247835Skib
1832247835Skib	/**
1833247835Skib	 * Wait for GPU, then move to system cached.
1834247835Skib	 */
1835247835Skib
1836247835Skib	mtx_lock(&bo->bdev->fence_lock);
1837247835Skib	ret = ttm_bo_wait(bo, false, false, false);
1838247835Skib	mtx_unlock(&bo->bdev->fence_lock);
1839247835Skib
1840247835Skib	if (unlikely(ret != 0))
1841247835Skib		goto out;
1842247835Skib
1843247835Skib	if ((bo->mem.placement & swap_placement) != swap_placement) {
1844247835Skib		struct ttm_mem_reg evict_mem;
1845247835Skib
1846247835Skib		evict_mem = bo->mem;
1847247835Skib		evict_mem.mm_node = NULL;
1848247835Skib		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1849247835Skib		evict_mem.mem_type = TTM_PL_SYSTEM;
1850247835Skib
1851247835Skib		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1852247835Skib					     false, false);
1853247835Skib		if (unlikely(ret != 0))
1854247835Skib			goto out;
1855247835Skib	}
1856247835Skib
1857247835Skib	ttm_bo_unmap_virtual(bo);
1858247835Skib
1859247835Skib	/**
1860247835Skib	 * Swap out. Buffer will be swapped in again as soon as
1861247835Skib	 * anyone tries to access a ttm page.
1862247835Skib	 */
1863247835Skib
1864247835Skib	if (bo->bdev->driver->swap_notify)
1865247835Skib		bo->bdev->driver->swap_notify(bo);
1866247835Skib
1867247835Skib	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1868247835Skibout:
1869247835Skib
1870247835Skib	/**
1871247835Skib	 *
1872247835Skib	 * Unreserve without putting on LRU to avoid swapping out an
1873247835Skib	 * already swapped buffer.
1874247835Skib	 */
1875247835Skib
1876247835Skib	atomic_set(&bo->reserved, 0);
1877247835Skib	wakeup(bo);
1878247835Skib	if (refcount_release(&bo->list_kref))
1879247835Skib		ttm_bo_release_list(bo);
1880247835Skib	return ret;
1881247835Skib}
1882247835Skib
1883247835Skibvoid ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1884247835Skib{
1885247835Skib	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1886247835Skib		;
1887247835Skib}
1888