• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/gpu/drm/ttm/
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
38
39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h"
42#include <linux/jiffies.h>
43#include <linux/slab.h>
44#include <linux/sched.h>
45#include <linux/mm.h>
46#include <linux/file.h>
47#include <linux/module.h>
48
49#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...)
51#define TTM_BO_HASH_ORDER 13
52
53static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
54static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
55static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57static struct attribute ttm_bo_count = {
58	.name = "bo_count",
59	.mode = S_IRUGO
60};
61
62static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64	int i;
65
66	for (i = 0; i <= TTM_PL_PRIV5; i++)
67		if (flags & (1 << i)) {
68			*mem_type = i;
69			return 0;
70		}
71	return -EINVAL;
72}
73
74static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
75{
76	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
78	printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
79	printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
80	printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
81	printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
82	printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
83	printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
84		man->available_caching);
85	printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
86		man->default_caching);
87	if (mem_type != TTM_PL_SYSTEM) {
88		spin_lock(&bdev->glob->lru_lock);
89		drm_mm_debug_table(&man->manager, TTM_PFX);
90		spin_unlock(&bdev->glob->lru_lock);
91	}
92}
93
94static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
95					struct ttm_placement *placement)
96{
97	int i, ret, mem_type;
98
99	printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
100		bo, bo->mem.num_pages, bo->mem.size >> 10,
101		bo->mem.size >> 20);
102	for (i = 0; i < placement->num_placement; i++) {
103		ret = ttm_mem_type_from_flags(placement->placement[i],
104						&mem_type);
105		if (ret)
106			return;
107		printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
108			i, placement->placement[i], mem_type);
109		ttm_mem_type_debug(bo->bdev, mem_type);
110	}
111}
112
113static ssize_t ttm_bo_global_show(struct kobject *kobj,
114				  struct attribute *attr,
115				  char *buffer)
116{
117	struct ttm_bo_global *glob =
118		container_of(kobj, struct ttm_bo_global, kobj);
119
120	return snprintf(buffer, PAGE_SIZE, "%lu\n",
121			(unsigned long) atomic_read(&glob->bo_count));
122}
123
124static struct attribute *ttm_bo_global_attrs[] = {
125	&ttm_bo_count,
126	NULL
127};
128
129static const struct sysfs_ops ttm_bo_global_ops = {
130	.show = &ttm_bo_global_show
131};
132
133static struct kobj_type ttm_bo_glob_kobj_type  = {
134	.release = &ttm_bo_global_kobj_release,
135	.sysfs_ops = &ttm_bo_global_ops,
136	.default_attrs = ttm_bo_global_attrs
137};
138
139
140static inline uint32_t ttm_bo_type_flags(unsigned type)
141{
142	return 1 << (type);
143}
144
145static void ttm_bo_release_list(struct kref *list_kref)
146{
147	struct ttm_buffer_object *bo =
148	    container_of(list_kref, struct ttm_buffer_object, list_kref);
149	struct ttm_bo_device *bdev = bo->bdev;
150
151	BUG_ON(atomic_read(&bo->list_kref.refcount));
152	BUG_ON(atomic_read(&bo->kref.refcount));
153	BUG_ON(atomic_read(&bo->cpu_writers));
154	BUG_ON(bo->sync_obj != NULL);
155	BUG_ON(bo->mem.mm_node != NULL);
156	BUG_ON(!list_empty(&bo->lru));
157	BUG_ON(!list_empty(&bo->ddestroy));
158
159	if (bo->ttm)
160		ttm_tt_destroy(bo->ttm);
161	atomic_dec(&bo->glob->bo_count);
162	if (bo->destroy)
163		bo->destroy(bo);
164	else {
165		ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
166		kfree(bo);
167	}
168}
169
170int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
171{
172
173	if (interruptible) {
174		int ret = 0;
175
176		ret = wait_event_interruptible(bo->event_queue,
177					       atomic_read(&bo->reserved) == 0);
178		if (unlikely(ret != 0))
179			return ret;
180	} else {
181		wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
182	}
183	return 0;
184}
185EXPORT_SYMBOL(ttm_bo_wait_unreserved);
186
187static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
188{
189	struct ttm_bo_device *bdev = bo->bdev;
190	struct ttm_mem_type_manager *man;
191
192	BUG_ON(!atomic_read(&bo->reserved));
193
194	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
195
196		BUG_ON(!list_empty(&bo->lru));
197
198		man = &bdev->man[bo->mem.mem_type];
199		list_add_tail(&bo->lru, &man->lru);
200		kref_get(&bo->list_kref);
201
202		if (bo->ttm != NULL) {
203			list_add_tail(&bo->swap, &bo->glob->swap_lru);
204			kref_get(&bo->list_kref);
205		}
206	}
207}
208
209/**
210 * Call with the lru_lock held.
211 */
212
213static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
214{
215	int put_count = 0;
216
217	if (!list_empty(&bo->swap)) {
218		list_del_init(&bo->swap);
219		++put_count;
220	}
221	if (!list_empty(&bo->lru)) {
222		list_del_init(&bo->lru);
223		++put_count;
224	}
225
226	/*
227	 * TODO: Add a driver hook to delete from
228	 * driver-specific LRU's here.
229	 */
230
231	return put_count;
232}
233
234int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
235			  bool interruptible,
236			  bool no_wait, bool use_sequence, uint32_t sequence)
237{
238	struct ttm_bo_global *glob = bo->glob;
239	int ret;
240
241	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
242		if (use_sequence && bo->seq_valid &&
243			(sequence - bo->val_seq < (1 << 31))) {
244			return -EAGAIN;
245		}
246
247		if (no_wait)
248			return -EBUSY;
249
250		spin_unlock(&glob->lru_lock);
251		ret = ttm_bo_wait_unreserved(bo, interruptible);
252		spin_lock(&glob->lru_lock);
253
254		if (unlikely(ret))
255			return ret;
256	}
257
258	if (use_sequence) {
259		bo->val_seq = sequence;
260		bo->seq_valid = true;
261	} else {
262		bo->seq_valid = false;
263	}
264
265	return 0;
266}
267EXPORT_SYMBOL(ttm_bo_reserve);
268
269static void ttm_bo_ref_bug(struct kref *list_kref)
270{
271	BUG();
272}
273
274int ttm_bo_reserve(struct ttm_buffer_object *bo,
275		   bool interruptible,
276		   bool no_wait, bool use_sequence, uint32_t sequence)
277{
278	struct ttm_bo_global *glob = bo->glob;
279	int put_count = 0;
280	int ret;
281
282	spin_lock(&glob->lru_lock);
283	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
284				    sequence);
285	if (likely(ret == 0))
286		put_count = ttm_bo_del_from_lru(bo);
287	spin_unlock(&glob->lru_lock);
288
289	while (put_count--)
290		kref_put(&bo->list_kref, ttm_bo_ref_bug);
291
292	return ret;
293}
294
295void ttm_bo_unreserve(struct ttm_buffer_object *bo)
296{
297	struct ttm_bo_global *glob = bo->glob;
298
299	spin_lock(&glob->lru_lock);
300	ttm_bo_add_to_lru(bo);
301	atomic_set(&bo->reserved, 0);
302	wake_up_all(&bo->event_queue);
303	spin_unlock(&glob->lru_lock);
304}
305EXPORT_SYMBOL(ttm_bo_unreserve);
306
307/*
308 * Call bo->mutex locked.
309 */
310static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
311{
312	struct ttm_bo_device *bdev = bo->bdev;
313	struct ttm_bo_global *glob = bo->glob;
314	int ret = 0;
315	uint32_t page_flags = 0;
316
317	TTM_ASSERT_LOCKED(&bo->mutex);
318	bo->ttm = NULL;
319
320	if (bdev->need_dma32)
321		page_flags |= TTM_PAGE_FLAG_DMA32;
322
323	switch (bo->type) {
324	case ttm_bo_type_device:
325		if (zero_alloc)
326			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
327	case ttm_bo_type_kernel:
328		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
329					page_flags, glob->dummy_read_page);
330		if (unlikely(bo->ttm == NULL))
331			ret = -ENOMEM;
332		break;
333	case ttm_bo_type_user:
334		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
335					page_flags | TTM_PAGE_FLAG_USER,
336					glob->dummy_read_page);
337		if (unlikely(bo->ttm == NULL)) {
338			ret = -ENOMEM;
339			break;
340		}
341
342		ret = ttm_tt_set_user(bo->ttm, current,
343				      bo->buffer_start, bo->num_pages);
344		if (unlikely(ret != 0))
345			ttm_tt_destroy(bo->ttm);
346		break;
347	default:
348		printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
349		ret = -EINVAL;
350		break;
351	}
352
353	return ret;
354}
355
356static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
357				  struct ttm_mem_reg *mem,
358				  bool evict, bool interruptible,
359				  bool no_wait_reserve, bool no_wait_gpu)
360{
361	struct ttm_bo_device *bdev = bo->bdev;
362	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
363	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
364	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
365	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
366	int ret = 0;
367
368	if (old_is_pci || new_is_pci ||
369	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
370		ttm_bo_unmap_virtual(bo);
371
372	/*
373	 * Create and bind a ttm if required.
374	 */
375
376	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
377		ret = ttm_bo_add_ttm(bo, false);
378		if (ret)
379			goto out_err;
380
381		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
382		if (ret)
383			goto out_err;
384
385		if (mem->mem_type != TTM_PL_SYSTEM) {
386			ret = ttm_tt_bind(bo->ttm, mem);
387			if (ret)
388				goto out_err;
389		}
390
391		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
392			bo->mem = *mem;
393			mem->mm_node = NULL;
394			goto moved;
395		}
396
397	}
398
399	if (bdev->driver->move_notify)
400		bdev->driver->move_notify(bo, mem);
401
402	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
403	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
404		ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
405	else if (bdev->driver->move)
406		ret = bdev->driver->move(bo, evict, interruptible,
407					 no_wait_reserve, no_wait_gpu, mem);
408	else
409		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
410
411	if (ret)
412		goto out_err;
413
414moved:
415	if (bo->evicted) {
416		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
417		if (ret)
418			printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
419		bo->evicted = false;
420	}
421
422	if (bo->mem.mm_node) {
423		spin_lock(&bo->lock);
424		bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
425		    bdev->man[bo->mem.mem_type].gpu_offset;
426		bo->cur_placement = bo->mem.placement;
427		spin_unlock(&bo->lock);
428	} else
429		bo->offset = 0;
430
431	return 0;
432
433out_err:
434	new_man = &bdev->man[bo->mem.mem_type];
435	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
436		ttm_tt_unbind(bo->ttm);
437		ttm_tt_destroy(bo->ttm);
438		bo->ttm = NULL;
439	}
440
441	return ret;
442}
443
444/**
445 * Call bo::reserved and with the lru lock held.
446 * Will release GPU memory type usage on destruction.
447 * This is the place to put in driver specific hooks.
448 * Will release the bo::reserved lock and the
449 * lru lock on exit.
450 */
451
452static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
453{
454	struct ttm_bo_global *glob = bo->glob;
455
456	if (bo->ttm) {
457
458		/**
459		 * Release the lru_lock, since we don't want to have
460		 * an atomic requirement on ttm_tt[unbind|destroy].
461		 */
462
463		spin_unlock(&glob->lru_lock);
464		ttm_tt_unbind(bo->ttm);
465		ttm_tt_destroy(bo->ttm);
466		bo->ttm = NULL;
467		spin_lock(&glob->lru_lock);
468	}
469
470	if (bo->mem.mm_node) {
471		drm_mm_put_block(bo->mem.mm_node);
472		bo->mem.mm_node = NULL;
473	}
474
475	atomic_set(&bo->reserved, 0);
476	wake_up_all(&bo->event_queue);
477	spin_unlock(&glob->lru_lock);
478}
479
480
481/**
482 * If bo idle, remove from delayed- and lru lists, and unref.
483 * If not idle, and already on delayed list, do nothing.
484 * If not idle, and not on delayed list, put on delayed list,
485 *   up the list_kref and schedule a delayed list check.
486 */
487
488static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
489{
490	struct ttm_bo_device *bdev = bo->bdev;
491	struct ttm_bo_global *glob = bo->glob;
492	struct ttm_bo_driver *driver = bdev->driver;
493	int ret;
494
495	spin_lock(&bo->lock);
496retry:
497	(void) ttm_bo_wait(bo, false, false, !remove_all);
498
499	if (!bo->sync_obj) {
500		int put_count;
501
502		spin_unlock(&bo->lock);
503
504		spin_lock(&glob->lru_lock);
505		ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
506
507		/**
508		 * Someone else has the object reserved. Bail and retry.
509		 */
510
511		if (unlikely(ret == -EBUSY)) {
512			spin_unlock(&glob->lru_lock);
513			spin_lock(&bo->lock);
514			goto requeue;
515		}
516
517		/**
518		 * We can re-check for sync object without taking
519		 * the bo::lock since setting the sync object requires
520		 * also bo::reserved. A busy object at this point may
521		 * be caused by another thread starting an accelerated
522		 * eviction.
523		 */
524
525		if (unlikely(bo->sync_obj)) {
526			atomic_set(&bo->reserved, 0);
527			wake_up_all(&bo->event_queue);
528			spin_unlock(&glob->lru_lock);
529			spin_lock(&bo->lock);
530			if (remove_all)
531				goto retry;
532			else
533				goto requeue;
534		}
535
536		put_count = ttm_bo_del_from_lru(bo);
537
538		if (!list_empty(&bo->ddestroy)) {
539			list_del_init(&bo->ddestroy);
540			++put_count;
541		}
542
543		ttm_bo_cleanup_memtype_use(bo);
544
545		while (put_count--)
546			kref_put(&bo->list_kref, ttm_bo_ref_bug);
547
548		return 0;
549	}
550requeue:
551	spin_lock(&glob->lru_lock);
552	if (list_empty(&bo->ddestroy)) {
553		void *sync_obj = bo->sync_obj;
554		void *sync_obj_arg = bo->sync_obj_arg;
555
556		kref_get(&bo->list_kref);
557		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
558		spin_unlock(&glob->lru_lock);
559		spin_unlock(&bo->lock);
560
561		if (sync_obj)
562			driver->sync_obj_flush(sync_obj, sync_obj_arg);
563		schedule_delayed_work(&bdev->wq,
564				      ((HZ / 100) < 1) ? 1 : HZ / 100);
565		ret = 0;
566
567	} else {
568		spin_unlock(&glob->lru_lock);
569		spin_unlock(&bo->lock);
570		ret = -EBUSY;
571	}
572
573	return ret;
574}
575
576/**
577 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
578 * encountered buffers.
579 */
580
581static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
582{
583	struct ttm_bo_global *glob = bdev->glob;
584	struct ttm_buffer_object *entry = NULL;
585	int ret = 0;
586
587	spin_lock(&glob->lru_lock);
588	if (list_empty(&bdev->ddestroy))
589		goto out_unlock;
590
591	entry = list_first_entry(&bdev->ddestroy,
592		struct ttm_buffer_object, ddestroy);
593	kref_get(&entry->list_kref);
594
595	for (;;) {
596		struct ttm_buffer_object *nentry = NULL;
597
598		if (entry->ddestroy.next != &bdev->ddestroy) {
599			nentry = list_first_entry(&entry->ddestroy,
600				struct ttm_buffer_object, ddestroy);
601			kref_get(&nentry->list_kref);
602		}
603
604		spin_unlock(&glob->lru_lock);
605		ret = ttm_bo_cleanup_refs(entry, remove_all);
606		kref_put(&entry->list_kref, ttm_bo_release_list);
607		entry = nentry;
608
609		if (ret || !entry)
610			goto out;
611
612		spin_lock(&glob->lru_lock);
613		if (list_empty(&entry->ddestroy))
614			break;
615	}
616
617out_unlock:
618	spin_unlock(&glob->lru_lock);
619out:
620	if (entry)
621		kref_put(&entry->list_kref, ttm_bo_release_list);
622	return ret;
623}
624
625static void ttm_bo_delayed_workqueue(struct work_struct *work)
626{
627	struct ttm_bo_device *bdev =
628	    container_of(work, struct ttm_bo_device, wq.work);
629
630	if (ttm_bo_delayed_delete(bdev, false)) {
631		schedule_delayed_work(&bdev->wq,
632				      ((HZ / 100) < 1) ? 1 : HZ / 100);
633	}
634}
635
636static void ttm_bo_release(struct kref *kref)
637{
638	struct ttm_buffer_object *bo =
639	    container_of(kref, struct ttm_buffer_object, kref);
640	struct ttm_bo_device *bdev = bo->bdev;
641
642	if (likely(bo->vm_node != NULL)) {
643		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
644		drm_mm_put_block(bo->vm_node);
645		bo->vm_node = NULL;
646	}
647	write_unlock(&bdev->vm_lock);
648	ttm_bo_cleanup_refs(bo, false);
649	kref_put(&bo->list_kref, ttm_bo_release_list);
650	write_lock(&bdev->vm_lock);
651}
652
653void ttm_bo_unref(struct ttm_buffer_object **p_bo)
654{
655	struct ttm_buffer_object *bo = *p_bo;
656	struct ttm_bo_device *bdev = bo->bdev;
657
658	*p_bo = NULL;
659	write_lock(&bdev->vm_lock);
660	kref_put(&bo->kref, ttm_bo_release);
661	write_unlock(&bdev->vm_lock);
662}
663EXPORT_SYMBOL(ttm_bo_unref);
664
665int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
666{
667	return cancel_delayed_work_sync(&bdev->wq);
668}
669EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
670
671void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
672{
673	if (resched)
674		schedule_delayed_work(&bdev->wq,
675				      ((HZ / 100) < 1) ? 1 : HZ / 100);
676}
677EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
678
679static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
680			bool no_wait_reserve, bool no_wait_gpu)
681{
682	struct ttm_bo_device *bdev = bo->bdev;
683	struct ttm_bo_global *glob = bo->glob;
684	struct ttm_mem_reg evict_mem;
685	struct ttm_placement placement;
686	int ret = 0;
687
688	spin_lock(&bo->lock);
689	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
690	spin_unlock(&bo->lock);
691
692	if (unlikely(ret != 0)) {
693		if (ret != -ERESTARTSYS) {
694			printk(KERN_ERR TTM_PFX
695			       "Failed to expire sync object before "
696			       "buffer eviction.\n");
697		}
698		goto out;
699	}
700
701	BUG_ON(!atomic_read(&bo->reserved));
702
703	evict_mem = bo->mem;
704	evict_mem.mm_node = NULL;
705	evict_mem.bus.io_reserved = false;
706
707	placement.fpfn = 0;
708	placement.lpfn = 0;
709	placement.num_placement = 0;
710	placement.num_busy_placement = 0;
711	bdev->driver->evict_flags(bo, &placement);
712	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
713				no_wait_reserve, no_wait_gpu);
714	if (ret) {
715		if (ret != -ERESTARTSYS) {
716			printk(KERN_ERR TTM_PFX
717			       "Failed to find memory space for "
718			       "buffer 0x%p eviction.\n", bo);
719			ttm_bo_mem_space_debug(bo, &placement);
720		}
721		goto out;
722	}
723
724	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
725				     no_wait_reserve, no_wait_gpu);
726	if (ret) {
727		if (ret != -ERESTARTSYS)
728			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
729		spin_lock(&glob->lru_lock);
730		if (evict_mem.mm_node) {
731			drm_mm_put_block(evict_mem.mm_node);
732			evict_mem.mm_node = NULL;
733		}
734		spin_unlock(&glob->lru_lock);
735		goto out;
736	}
737	bo->evicted = true;
738out:
739	return ret;
740}
741
742static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
743				uint32_t mem_type,
744				bool interruptible, bool no_wait_reserve,
745				bool no_wait_gpu)
746{
747	struct ttm_bo_global *glob = bdev->glob;
748	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
749	struct ttm_buffer_object *bo;
750	int ret, put_count = 0;
751
752retry:
753	spin_lock(&glob->lru_lock);
754	if (list_empty(&man->lru)) {
755		spin_unlock(&glob->lru_lock);
756		return -EBUSY;
757	}
758
759	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
760	kref_get(&bo->list_kref);
761
762	ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
763
764	if (unlikely(ret == -EBUSY)) {
765		spin_unlock(&glob->lru_lock);
766		if (likely(!no_wait_gpu))
767			ret = ttm_bo_wait_unreserved(bo, interruptible);
768
769		kref_put(&bo->list_kref, ttm_bo_release_list);
770
771		/**
772		 * We *need* to retry after releasing the lru lock.
773		 */
774
775		if (unlikely(ret != 0))
776			return ret;
777		goto retry;
778	}
779
780	put_count = ttm_bo_del_from_lru(bo);
781	spin_unlock(&glob->lru_lock);
782
783	BUG_ON(ret != 0);
784
785	while (put_count--)
786		kref_put(&bo->list_kref, ttm_bo_ref_bug);
787
788	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
789	ttm_bo_unreserve(bo);
790
791	kref_put(&bo->list_kref, ttm_bo_release_list);
792	return ret;
793}
794
795static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
796				struct ttm_mem_type_manager *man,
797				struct ttm_placement *placement,
798				struct ttm_mem_reg *mem,
799				struct drm_mm_node **node)
800{
801	struct ttm_bo_global *glob = bo->glob;
802	unsigned long lpfn;
803	int ret;
804
805	lpfn = placement->lpfn;
806	if (!lpfn)
807		lpfn = man->size;
808	*node = NULL;
809	do {
810		ret = drm_mm_pre_get(&man->manager);
811		if (unlikely(ret))
812			return ret;
813
814		spin_lock(&glob->lru_lock);
815		*node = drm_mm_search_free_in_range(&man->manager,
816					mem->num_pages, mem->page_alignment,
817					placement->fpfn, lpfn, 1);
818		if (unlikely(*node == NULL)) {
819			spin_unlock(&glob->lru_lock);
820			return 0;
821		}
822		*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
823							mem->page_alignment,
824							placement->fpfn,
825							lpfn);
826		spin_unlock(&glob->lru_lock);
827	} while (*node == NULL);
828	return 0;
829}
830
831/**
832 * Repeatedly evict memory from the LRU for @mem_type until we create enough
833 * space, or we've evicted everything and there isn't enough space.
834 */
835static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
836					uint32_t mem_type,
837					struct ttm_placement *placement,
838					struct ttm_mem_reg *mem,
839					bool interruptible,
840					bool no_wait_reserve,
841					bool no_wait_gpu)
842{
843	struct ttm_bo_device *bdev = bo->bdev;
844	struct ttm_bo_global *glob = bdev->glob;
845	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
846	struct drm_mm_node *node;
847	int ret;
848
849	do {
850		ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
851		if (unlikely(ret != 0))
852			return ret;
853		if (node)
854			break;
855		spin_lock(&glob->lru_lock);
856		if (list_empty(&man->lru)) {
857			spin_unlock(&glob->lru_lock);
858			break;
859		}
860		spin_unlock(&glob->lru_lock);
861		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
862						no_wait_reserve, no_wait_gpu);
863		if (unlikely(ret != 0))
864			return ret;
865	} while (1);
866	if (node == NULL)
867		return -ENOMEM;
868	mem->mm_node = node;
869	mem->mem_type = mem_type;
870	return 0;
871}
872
873static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
874				      uint32_t cur_placement,
875				      uint32_t proposed_placement)
876{
877	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
878	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
879
880	/**
881	 * Keep current caching if possible.
882	 */
883
884	if ((cur_placement & caching) != 0)
885		result |= (cur_placement & caching);
886	else if ((man->default_caching & caching) != 0)
887		result |= man->default_caching;
888	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
889		result |= TTM_PL_FLAG_CACHED;
890	else if ((TTM_PL_FLAG_WC & caching) != 0)
891		result |= TTM_PL_FLAG_WC;
892	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
893		result |= TTM_PL_FLAG_UNCACHED;
894
895	return result;
896}
897
898static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
899				 bool disallow_fixed,
900				 uint32_t mem_type,
901				 uint32_t proposed_placement,
902				 uint32_t *masked_placement)
903{
904	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
905
906	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
907		return false;
908
909	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
910		return false;
911
912	if ((proposed_placement & man->available_caching) == 0)
913		return false;
914
915	cur_flags |= (proposed_placement & man->available_caching);
916
917	*masked_placement = cur_flags;
918	return true;
919}
920
921/**
922 * Creates space for memory region @mem according to its type.
923 *
924 * This function first searches for free space in compatible memory types in
925 * the priority order defined by the driver.  If free space isn't found, then
926 * ttm_bo_mem_force_space is attempted in priority order to evict and find
927 * space.
928 */
929int ttm_bo_mem_space(struct ttm_buffer_object *bo,
930			struct ttm_placement *placement,
931			struct ttm_mem_reg *mem,
932			bool interruptible, bool no_wait_reserve,
933			bool no_wait_gpu)
934{
935	struct ttm_bo_device *bdev = bo->bdev;
936	struct ttm_mem_type_manager *man;
937	uint32_t mem_type = TTM_PL_SYSTEM;
938	uint32_t cur_flags = 0;
939	bool type_found = false;
940	bool type_ok = false;
941	bool has_erestartsys = false;
942	struct drm_mm_node *node = NULL;
943	int i, ret;
944
945	mem->mm_node = NULL;
946	for (i = 0; i < placement->num_placement; ++i) {
947		ret = ttm_mem_type_from_flags(placement->placement[i],
948						&mem_type);
949		if (ret)
950			return ret;
951		man = &bdev->man[mem_type];
952
953		type_ok = ttm_bo_mt_compatible(man,
954						bo->type == ttm_bo_type_user,
955						mem_type,
956						placement->placement[i],
957						&cur_flags);
958
959		if (!type_ok)
960			continue;
961
962		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
963						  cur_flags);
964		/*
965		 * Use the access and other non-mapping-related flag bits from
966		 * the memory placement flags to the current flags
967		 */
968		ttm_flag_masked(&cur_flags, placement->placement[i],
969				~TTM_PL_MASK_MEMTYPE);
970
971		if (mem_type == TTM_PL_SYSTEM)
972			break;
973
974		if (man->has_type && man->use_type) {
975			type_found = true;
976			ret = ttm_bo_man_get_node(bo, man, placement, mem,
977							&node);
978			if (unlikely(ret))
979				return ret;
980		}
981		if (node)
982			break;
983	}
984
985	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
986		mem->mm_node = node;
987		mem->mem_type = mem_type;
988		mem->placement = cur_flags;
989		return 0;
990	}
991
992	if (!type_found)
993		return -EINVAL;
994
995	for (i = 0; i < placement->num_busy_placement; ++i) {
996		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
997						&mem_type);
998		if (ret)
999			return ret;
1000		man = &bdev->man[mem_type];
1001		if (!man->has_type)
1002			continue;
1003		if (!ttm_bo_mt_compatible(man,
1004						bo->type == ttm_bo_type_user,
1005						mem_type,
1006						placement->busy_placement[i],
1007						&cur_flags))
1008			continue;
1009
1010		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1011						  cur_flags);
1012		/*
1013		 * Use the access and other non-mapping-related flag bits from
1014		 * the memory placement flags to the current flags
1015		 */
1016		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1017				~TTM_PL_MASK_MEMTYPE);
1018
1019
1020		if (mem_type == TTM_PL_SYSTEM) {
1021			mem->mem_type = mem_type;
1022			mem->placement = cur_flags;
1023			mem->mm_node = NULL;
1024			return 0;
1025		}
1026
1027		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1028						interruptible, no_wait_reserve, no_wait_gpu);
1029		if (ret == 0 && mem->mm_node) {
1030			mem->placement = cur_flags;
1031			return 0;
1032		}
1033		if (ret == -ERESTARTSYS)
1034			has_erestartsys = true;
1035	}
1036	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1037	return ret;
1038}
1039EXPORT_SYMBOL(ttm_bo_mem_space);
1040
1041int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1042{
1043	if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1044		return -EBUSY;
1045
1046	return wait_event_interruptible(bo->event_queue,
1047					atomic_read(&bo->cpu_writers) == 0);
1048}
1049EXPORT_SYMBOL(ttm_bo_wait_cpu);
1050
1051int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1052			struct ttm_placement *placement,
1053			bool interruptible, bool no_wait_reserve,
1054			bool no_wait_gpu)
1055{
1056	struct ttm_bo_global *glob = bo->glob;
1057	int ret = 0;
1058	struct ttm_mem_reg mem;
1059
1060	BUG_ON(!atomic_read(&bo->reserved));
1061
1062	spin_lock(&bo->lock);
1063	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1064	spin_unlock(&bo->lock);
1065	if (ret)
1066		return ret;
1067	mem.num_pages = bo->num_pages;
1068	mem.size = mem.num_pages << PAGE_SHIFT;
1069	mem.page_alignment = bo->mem.page_alignment;
1070	mem.bus.io_reserved = false;
1071	/*
1072	 * Determine where to move the buffer.
1073	 */
1074	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1075	if (ret)
1076		goto out_unlock;
1077	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1078out_unlock:
1079	if (ret && mem.mm_node) {
1080		spin_lock(&glob->lru_lock);
1081		drm_mm_put_block(mem.mm_node);
1082		spin_unlock(&glob->lru_lock);
1083	}
1084	return ret;
1085}
1086
1087static int ttm_bo_mem_compat(struct ttm_placement *placement,
1088			     struct ttm_mem_reg *mem)
1089{
1090	int i;
1091	struct drm_mm_node *node = mem->mm_node;
1092
1093	if (node && placement->lpfn != 0 &&
1094	    (node->start < placement->fpfn ||
1095	     node->start + node->size > placement->lpfn))
1096		return -1;
1097
1098	for (i = 0; i < placement->num_placement; i++) {
1099		if ((placement->placement[i] & mem->placement &
1100			TTM_PL_MASK_CACHING) &&
1101			(placement->placement[i] & mem->placement &
1102			TTM_PL_MASK_MEM))
1103			return i;
1104	}
1105	return -1;
1106}
1107
1108int ttm_bo_validate(struct ttm_buffer_object *bo,
1109			struct ttm_placement *placement,
1110			bool interruptible, bool no_wait_reserve,
1111			bool no_wait_gpu)
1112{
1113	int ret;
1114
1115	BUG_ON(!atomic_read(&bo->reserved));
1116	/* Check that range is valid */
1117	if (placement->lpfn || placement->fpfn)
1118		if (placement->fpfn > placement->lpfn ||
1119			(placement->lpfn - placement->fpfn) < bo->num_pages)
1120			return -EINVAL;
1121	/*
1122	 * Check whether we need to move buffer.
1123	 */
1124	ret = ttm_bo_mem_compat(placement, &bo->mem);
1125	if (ret < 0) {
1126		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1127		if (ret)
1128			return ret;
1129	} else {
1130		/*
1131		 * Use the access and other non-mapping-related flag bits from
1132		 * the compatible memory placement flags to the active flags
1133		 */
1134		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1135				~TTM_PL_MASK_MEMTYPE);
1136	}
1137	/*
1138	 * We might need to add a TTM.
1139	 */
1140	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1141		ret = ttm_bo_add_ttm(bo, true);
1142		if (ret)
1143			return ret;
1144	}
1145	return 0;
1146}
1147EXPORT_SYMBOL(ttm_bo_validate);
1148
1149int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1150				struct ttm_placement *placement)
1151{
1152	int i;
1153
1154	if (placement->fpfn || placement->lpfn) {
1155		if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1156			printk(KERN_ERR TTM_PFX "Page number range to small "
1157				"Need %lu pages, range is [%u, %u]\n",
1158				bo->mem.num_pages, placement->fpfn,
1159				placement->lpfn);
1160			return -EINVAL;
1161		}
1162	}
1163	for (i = 0; i < placement->num_placement; i++) {
1164		if (!capable(CAP_SYS_ADMIN)) {
1165			if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1166				printk(KERN_ERR TTM_PFX "Need to be root to "
1167					"modify NO_EVICT status.\n");
1168				return -EINVAL;
1169			}
1170		}
1171	}
1172	for (i = 0; i < placement->num_busy_placement; i++) {
1173		if (!capable(CAP_SYS_ADMIN)) {
1174			if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1175				printk(KERN_ERR TTM_PFX "Need to be root to "
1176					"modify NO_EVICT status.\n");
1177				return -EINVAL;
1178			}
1179		}
1180	}
1181	return 0;
1182}
1183
1184int ttm_bo_init(struct ttm_bo_device *bdev,
1185		struct ttm_buffer_object *bo,
1186		unsigned long size,
1187		enum ttm_bo_type type,
1188		struct ttm_placement *placement,
1189		uint32_t page_alignment,
1190		unsigned long buffer_start,
1191		bool interruptible,
1192		struct file *persistant_swap_storage,
1193		size_t acc_size,
1194		void (*destroy) (struct ttm_buffer_object *))
1195{
1196	int ret = 0;
1197	unsigned long num_pages;
1198
1199	size += buffer_start & ~PAGE_MASK;
1200	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1201	if (num_pages == 0) {
1202		printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1203		return -EINVAL;
1204	}
1205	bo->destroy = destroy;
1206
1207	spin_lock_init(&bo->lock);
1208	kref_init(&bo->kref);
1209	kref_init(&bo->list_kref);
1210	atomic_set(&bo->cpu_writers, 0);
1211	atomic_set(&bo->reserved, 1);
1212	init_waitqueue_head(&bo->event_queue);
1213	INIT_LIST_HEAD(&bo->lru);
1214	INIT_LIST_HEAD(&bo->ddestroy);
1215	INIT_LIST_HEAD(&bo->swap);
1216	bo->bdev = bdev;
1217	bo->glob = bdev->glob;
1218	bo->type = type;
1219	bo->num_pages = num_pages;
1220	bo->mem.size = num_pages << PAGE_SHIFT;
1221	bo->mem.mem_type = TTM_PL_SYSTEM;
1222	bo->mem.num_pages = bo->num_pages;
1223	bo->mem.mm_node = NULL;
1224	bo->mem.page_alignment = page_alignment;
1225	bo->mem.bus.io_reserved = false;
1226	bo->buffer_start = buffer_start & PAGE_MASK;
1227	bo->priv_flags = 0;
1228	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1229	bo->seq_valid = false;
1230	bo->persistant_swap_storage = persistant_swap_storage;
1231	bo->acc_size = acc_size;
1232	atomic_inc(&bo->glob->bo_count);
1233
1234	ret = ttm_bo_check_placement(bo, placement);
1235	if (unlikely(ret != 0))
1236		goto out_err;
1237
1238	/*
1239	 * For ttm_bo_type_device buffers, allocate
1240	 * address space from the device.
1241	 */
1242	if (bo->type == ttm_bo_type_device) {
1243		ret = ttm_bo_setup_vm(bo);
1244		if (ret)
1245			goto out_err;
1246	}
1247
1248	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1249	if (ret)
1250		goto out_err;
1251
1252	ttm_bo_unreserve(bo);
1253	return 0;
1254
1255out_err:
1256	ttm_bo_unreserve(bo);
1257	ttm_bo_unref(&bo);
1258
1259	return ret;
1260}
1261EXPORT_SYMBOL(ttm_bo_init);
1262
1263static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1264				 unsigned long num_pages)
1265{
1266	size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1267	    PAGE_MASK;
1268
1269	return glob->ttm_bo_size + 2 * page_array_size;
1270}
1271
1272int ttm_bo_create(struct ttm_bo_device *bdev,
1273			unsigned long size,
1274			enum ttm_bo_type type,
1275			struct ttm_placement *placement,
1276			uint32_t page_alignment,
1277			unsigned long buffer_start,
1278			bool interruptible,
1279			struct file *persistant_swap_storage,
1280			struct ttm_buffer_object **p_bo)
1281{
1282	struct ttm_buffer_object *bo;
1283	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1284	int ret;
1285
1286	size_t acc_size =
1287	    ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1288	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1289	if (unlikely(ret != 0))
1290		return ret;
1291
1292	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1293
1294	if (unlikely(bo == NULL)) {
1295		ttm_mem_global_free(mem_glob, acc_size);
1296		return -ENOMEM;
1297	}
1298
1299	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1300				buffer_start, interruptible,
1301				persistant_swap_storage, acc_size, NULL);
1302	if (likely(ret == 0))
1303		*p_bo = bo;
1304
1305	return ret;
1306}
1307
1308static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1309					unsigned mem_type, bool allow_errors)
1310{
1311	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1312	struct ttm_bo_global *glob = bdev->glob;
1313	int ret;
1314
1315	/*
1316	 * Can't use standard list traversal since we're unlocking.
1317	 */
1318
1319	spin_lock(&glob->lru_lock);
1320	while (!list_empty(&man->lru)) {
1321		spin_unlock(&glob->lru_lock);
1322		ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1323		if (ret) {
1324			if (allow_errors) {
1325				return ret;
1326			} else {
1327				printk(KERN_ERR TTM_PFX
1328					"Cleanup eviction failed\n");
1329			}
1330		}
1331		spin_lock(&glob->lru_lock);
1332	}
1333	spin_unlock(&glob->lru_lock);
1334	return 0;
1335}
1336
1337int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1338{
1339	struct ttm_bo_global *glob = bdev->glob;
1340	struct ttm_mem_type_manager *man;
1341	int ret = -EINVAL;
1342
1343	if (mem_type >= TTM_NUM_MEM_TYPES) {
1344		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1345		return ret;
1346	}
1347	man = &bdev->man[mem_type];
1348
1349	if (!man->has_type) {
1350		printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1351		       "memory manager type %u\n", mem_type);
1352		return ret;
1353	}
1354
1355	man->use_type = false;
1356	man->has_type = false;
1357
1358	ret = 0;
1359	if (mem_type > 0) {
1360		ttm_bo_force_list_clean(bdev, mem_type, false);
1361
1362		spin_lock(&glob->lru_lock);
1363		if (drm_mm_clean(&man->manager))
1364			drm_mm_takedown(&man->manager);
1365		else
1366			ret = -EBUSY;
1367
1368		spin_unlock(&glob->lru_lock);
1369	}
1370
1371	return ret;
1372}
1373EXPORT_SYMBOL(ttm_bo_clean_mm);
1374
1375int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1376{
1377	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1378
1379	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1380		printk(KERN_ERR TTM_PFX
1381		       "Illegal memory manager memory type %u.\n",
1382		       mem_type);
1383		return -EINVAL;
1384	}
1385
1386	if (!man->has_type) {
1387		printk(KERN_ERR TTM_PFX
1388		       "Memory type %u has not been initialized.\n",
1389		       mem_type);
1390		return 0;
1391	}
1392
1393	return ttm_bo_force_list_clean(bdev, mem_type, true);
1394}
1395EXPORT_SYMBOL(ttm_bo_evict_mm);
1396
1397int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1398			unsigned long p_size)
1399{
1400	int ret = -EINVAL;
1401	struct ttm_mem_type_manager *man;
1402
1403	if (type >= TTM_NUM_MEM_TYPES) {
1404		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1405		return ret;
1406	}
1407
1408	man = &bdev->man[type];
1409	if (man->has_type) {
1410		printk(KERN_ERR TTM_PFX
1411		       "Memory manager already initialized for type %d\n",
1412		       type);
1413		return ret;
1414	}
1415
1416	ret = bdev->driver->init_mem_type(bdev, type, man);
1417	if (ret)
1418		return ret;
1419
1420	ret = 0;
1421	if (type != TTM_PL_SYSTEM) {
1422		if (!p_size) {
1423			printk(KERN_ERR TTM_PFX
1424			       "Zero size memory manager type %d\n",
1425			       type);
1426			return ret;
1427		}
1428		ret = drm_mm_init(&man->manager, 0, p_size);
1429		if (ret)
1430			return ret;
1431	}
1432	man->has_type = true;
1433	man->use_type = true;
1434	man->size = p_size;
1435
1436	INIT_LIST_HEAD(&man->lru);
1437
1438	return 0;
1439}
1440EXPORT_SYMBOL(ttm_bo_init_mm);
1441
1442static void ttm_bo_global_kobj_release(struct kobject *kobj)
1443{
1444	struct ttm_bo_global *glob =
1445		container_of(kobj, struct ttm_bo_global, kobj);
1446
1447	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1448	__free_page(glob->dummy_read_page);
1449	kfree(glob);
1450}
1451
1452void ttm_bo_global_release(struct drm_global_reference *ref)
1453{
1454	struct ttm_bo_global *glob = ref->object;
1455
1456	kobject_del(&glob->kobj);
1457	kobject_put(&glob->kobj);
1458}
1459EXPORT_SYMBOL(ttm_bo_global_release);
1460
1461int ttm_bo_global_init(struct drm_global_reference *ref)
1462{
1463	struct ttm_bo_global_ref *bo_ref =
1464		container_of(ref, struct ttm_bo_global_ref, ref);
1465	struct ttm_bo_global *glob = ref->object;
1466	int ret;
1467
1468	mutex_init(&glob->device_list_mutex);
1469	spin_lock_init(&glob->lru_lock);
1470	glob->mem_glob = bo_ref->mem_glob;
1471	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1472
1473	if (unlikely(glob->dummy_read_page == NULL)) {
1474		ret = -ENOMEM;
1475		goto out_no_drp;
1476	}
1477
1478	INIT_LIST_HEAD(&glob->swap_lru);
1479	INIT_LIST_HEAD(&glob->device_list);
1480
1481	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1482	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1483	if (unlikely(ret != 0)) {
1484		printk(KERN_ERR TTM_PFX
1485		       "Could not register buffer object swapout.\n");
1486		goto out_no_shrink;
1487	}
1488
1489	glob->ttm_bo_extra_size =
1490		ttm_round_pot(sizeof(struct ttm_tt)) +
1491		ttm_round_pot(sizeof(struct ttm_backend));
1492
1493	glob->ttm_bo_size = glob->ttm_bo_extra_size +
1494		ttm_round_pot(sizeof(struct ttm_buffer_object));
1495
1496	atomic_set(&glob->bo_count, 0);
1497
1498	ret = kobject_init_and_add(
1499		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1500	if (unlikely(ret != 0))
1501		kobject_put(&glob->kobj);
1502	return ret;
1503out_no_shrink:
1504	__free_page(glob->dummy_read_page);
1505out_no_drp:
1506	kfree(glob);
1507	return ret;
1508}
1509EXPORT_SYMBOL(ttm_bo_global_init);
1510
1511
1512int ttm_bo_device_release(struct ttm_bo_device *bdev)
1513{
1514	int ret = 0;
1515	unsigned i = TTM_NUM_MEM_TYPES;
1516	struct ttm_mem_type_manager *man;
1517	struct ttm_bo_global *glob = bdev->glob;
1518
1519	while (i--) {
1520		man = &bdev->man[i];
1521		if (man->has_type) {
1522			man->use_type = false;
1523			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1524				ret = -EBUSY;
1525				printk(KERN_ERR TTM_PFX
1526				       "DRM memory manager type %d "
1527				       "is not clean.\n", i);
1528			}
1529			man->has_type = false;
1530		}
1531	}
1532
1533	mutex_lock(&glob->device_list_mutex);
1534	list_del(&bdev->device_list);
1535	mutex_unlock(&glob->device_list_mutex);
1536
1537	if (!cancel_delayed_work(&bdev->wq))
1538		flush_scheduled_work();
1539
1540	while (ttm_bo_delayed_delete(bdev, true))
1541		;
1542
1543	spin_lock(&glob->lru_lock);
1544	if (list_empty(&bdev->ddestroy))
1545		TTM_DEBUG("Delayed destroy list was clean\n");
1546
1547	if (list_empty(&bdev->man[0].lru))
1548		TTM_DEBUG("Swap list was clean\n");
1549	spin_unlock(&glob->lru_lock);
1550
1551	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1552	write_lock(&bdev->vm_lock);
1553	drm_mm_takedown(&bdev->addr_space_mm);
1554	write_unlock(&bdev->vm_lock);
1555
1556	return ret;
1557}
1558EXPORT_SYMBOL(ttm_bo_device_release);
1559
1560int ttm_bo_device_init(struct ttm_bo_device *bdev,
1561		       struct ttm_bo_global *glob,
1562		       struct ttm_bo_driver *driver,
1563		       uint64_t file_page_offset,
1564		       bool need_dma32)
1565{
1566	int ret = -EINVAL;
1567
1568	rwlock_init(&bdev->vm_lock);
1569	bdev->driver = driver;
1570
1571	memset(bdev->man, 0, sizeof(bdev->man));
1572
1573	/*
1574	 * Initialize the system memory buffer type.
1575	 * Other types need to be driver / IOCTL initialized.
1576	 */
1577	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1578	if (unlikely(ret != 0))
1579		goto out_no_sys;
1580
1581	bdev->addr_space_rb = RB_ROOT;
1582	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1583	if (unlikely(ret != 0))
1584		goto out_no_addr_mm;
1585
1586	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1587	bdev->nice_mode = true;
1588	INIT_LIST_HEAD(&bdev->ddestroy);
1589	bdev->dev_mapping = NULL;
1590	bdev->glob = glob;
1591	bdev->need_dma32 = need_dma32;
1592
1593	mutex_lock(&glob->device_list_mutex);
1594	list_add_tail(&bdev->device_list, &glob->device_list);
1595	mutex_unlock(&glob->device_list_mutex);
1596
1597	return 0;
1598out_no_addr_mm:
1599	ttm_bo_clean_mm(bdev, 0);
1600out_no_sys:
1601	return ret;
1602}
1603EXPORT_SYMBOL(ttm_bo_device_init);
1604
1605/*
1606 * buffer object vm functions.
1607 */
1608
1609bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1610{
1611	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1612
1613	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1614		if (mem->mem_type == TTM_PL_SYSTEM)
1615			return false;
1616
1617		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1618			return false;
1619
1620		if (mem->placement & TTM_PL_FLAG_CACHED)
1621			return false;
1622	}
1623	return true;
1624}
1625
1626void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1627{
1628	struct ttm_bo_device *bdev = bo->bdev;
1629	loff_t offset = (loff_t) bo->addr_space_offset;
1630	loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1631
1632	if (!bdev->dev_mapping)
1633		return;
1634	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1635	ttm_mem_io_free(bdev, &bo->mem);
1636}
1637EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1638
1639static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1640{
1641	struct ttm_bo_device *bdev = bo->bdev;
1642	struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1643	struct rb_node *parent = NULL;
1644	struct ttm_buffer_object *cur_bo;
1645	unsigned long offset = bo->vm_node->start;
1646	unsigned long cur_offset;
1647
1648	while (*cur) {
1649		parent = *cur;
1650		cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1651		cur_offset = cur_bo->vm_node->start;
1652		if (offset < cur_offset)
1653			cur = &parent->rb_left;
1654		else if (offset > cur_offset)
1655			cur = &parent->rb_right;
1656		else
1657			BUG();
1658	}
1659
1660	rb_link_node(&bo->vm_rb, parent, cur);
1661	rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1662}
1663
1664/**
1665 * ttm_bo_setup_vm:
1666 *
1667 * @bo: the buffer to allocate address space for
1668 *
1669 * Allocate address space in the drm device so that applications
1670 * can mmap the buffer and access the contents. This only
1671 * applies to ttm_bo_type_device objects as others are not
1672 * placed in the drm device address space.
1673 */
1674
1675static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1676{
1677	struct ttm_bo_device *bdev = bo->bdev;
1678	int ret;
1679
1680retry_pre_get:
1681	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1682	if (unlikely(ret != 0))
1683		return ret;
1684
1685	write_lock(&bdev->vm_lock);
1686	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1687					 bo->mem.num_pages, 0, 0);
1688
1689	if (unlikely(bo->vm_node == NULL)) {
1690		ret = -ENOMEM;
1691		goto out_unlock;
1692	}
1693
1694	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1695					      bo->mem.num_pages, 0);
1696
1697	if (unlikely(bo->vm_node == NULL)) {
1698		write_unlock(&bdev->vm_lock);
1699		goto retry_pre_get;
1700	}
1701
1702	ttm_bo_vm_insert_rb(bo);
1703	write_unlock(&bdev->vm_lock);
1704	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1705
1706	return 0;
1707out_unlock:
1708	write_unlock(&bdev->vm_lock);
1709	return ret;
1710}
1711
1712int ttm_bo_wait(struct ttm_buffer_object *bo,
1713		bool lazy, bool interruptible, bool no_wait)
1714{
1715	struct ttm_bo_driver *driver = bo->bdev->driver;
1716	void *sync_obj;
1717	void *sync_obj_arg;
1718	int ret = 0;
1719
1720	if (likely(bo->sync_obj == NULL))
1721		return 0;
1722
1723	while (bo->sync_obj) {
1724
1725		if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1726			void *tmp_obj = bo->sync_obj;
1727			bo->sync_obj = NULL;
1728			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1729			spin_unlock(&bo->lock);
1730			driver->sync_obj_unref(&tmp_obj);
1731			spin_lock(&bo->lock);
1732			continue;
1733		}
1734
1735		if (no_wait)
1736			return -EBUSY;
1737
1738		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1739		sync_obj_arg = bo->sync_obj_arg;
1740		spin_unlock(&bo->lock);
1741		ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1742					    lazy, interruptible);
1743		if (unlikely(ret != 0)) {
1744			driver->sync_obj_unref(&sync_obj);
1745			spin_lock(&bo->lock);
1746			return ret;
1747		}
1748		spin_lock(&bo->lock);
1749		if (likely(bo->sync_obj == sync_obj &&
1750			   bo->sync_obj_arg == sync_obj_arg)) {
1751			void *tmp_obj = bo->sync_obj;
1752			bo->sync_obj = NULL;
1753			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1754				  &bo->priv_flags);
1755			spin_unlock(&bo->lock);
1756			driver->sync_obj_unref(&sync_obj);
1757			driver->sync_obj_unref(&tmp_obj);
1758			spin_lock(&bo->lock);
1759		} else {
1760			spin_unlock(&bo->lock);
1761			driver->sync_obj_unref(&sync_obj);
1762			spin_lock(&bo->lock);
1763		}
1764	}
1765	return 0;
1766}
1767EXPORT_SYMBOL(ttm_bo_wait);
1768
1769int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1770{
1771	int ret = 0;
1772
1773	/*
1774	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1775	 */
1776
1777	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1778	if (unlikely(ret != 0))
1779		return ret;
1780	spin_lock(&bo->lock);
1781	ret = ttm_bo_wait(bo, false, true, no_wait);
1782	spin_unlock(&bo->lock);
1783	if (likely(ret == 0))
1784		atomic_inc(&bo->cpu_writers);
1785	ttm_bo_unreserve(bo);
1786	return ret;
1787}
1788EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1789
1790void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1791{
1792	if (atomic_dec_and_test(&bo->cpu_writers))
1793		wake_up_all(&bo->event_queue);
1794}
1795EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1796
1797/**
1798 * A buffer object shrink method that tries to swap out the first
1799 * buffer object on the bo_global::swap_lru list.
1800 */
1801
1802static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1803{
1804	struct ttm_bo_global *glob =
1805	    container_of(shrink, struct ttm_bo_global, shrink);
1806	struct ttm_buffer_object *bo;
1807	int ret = -EBUSY;
1808	int put_count;
1809	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1810
1811	spin_lock(&glob->lru_lock);
1812	while (ret == -EBUSY) {
1813		if (unlikely(list_empty(&glob->swap_lru))) {
1814			spin_unlock(&glob->lru_lock);
1815			return -EBUSY;
1816		}
1817
1818		bo = list_first_entry(&glob->swap_lru,
1819				      struct ttm_buffer_object, swap);
1820		kref_get(&bo->list_kref);
1821
1822		/**
1823		 * Reserve buffer. Since we unlock while sleeping, we need
1824		 * to re-check that nobody removed us from the swap-list while
1825		 * we slept.
1826		 */
1827
1828		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1829		if (unlikely(ret == -EBUSY)) {
1830			spin_unlock(&glob->lru_lock);
1831			ttm_bo_wait_unreserved(bo, false);
1832			kref_put(&bo->list_kref, ttm_bo_release_list);
1833			spin_lock(&glob->lru_lock);
1834		}
1835	}
1836
1837	BUG_ON(ret != 0);
1838	put_count = ttm_bo_del_from_lru(bo);
1839	spin_unlock(&glob->lru_lock);
1840
1841	while (put_count--)
1842		kref_put(&bo->list_kref, ttm_bo_ref_bug);
1843
1844	/**
1845	 * Wait for GPU, then move to system cached.
1846	 */
1847
1848	spin_lock(&bo->lock);
1849	ret = ttm_bo_wait(bo, false, false, false);
1850	spin_unlock(&bo->lock);
1851
1852	if (unlikely(ret != 0))
1853		goto out;
1854
1855	if ((bo->mem.placement & swap_placement) != swap_placement) {
1856		struct ttm_mem_reg evict_mem;
1857
1858		evict_mem = bo->mem;
1859		evict_mem.mm_node = NULL;
1860		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1861		evict_mem.mem_type = TTM_PL_SYSTEM;
1862
1863		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1864					     false, false, false);
1865		if (unlikely(ret != 0))
1866			goto out;
1867	}
1868
1869	ttm_bo_unmap_virtual(bo);
1870
1871	/**
1872	 * Swap out. Buffer will be swapped in again as soon as
1873	 * anyone tries to access a ttm page.
1874	 */
1875
1876	if (bo->bdev->driver->swap_notify)
1877		bo->bdev->driver->swap_notify(bo);
1878
1879	ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1880out:
1881
1882	/**
1883	 *
1884	 * Unreserve without putting on LRU to avoid swapping out an
1885	 * already swapped buffer.
1886	 */
1887
1888	atomic_set(&bo->reserved, 0);
1889	wake_up_all(&bo->event_queue);
1890	kref_put(&bo->list_kref, ttm_bo_release_list);
1891	return ret;
1892}
1893
1894void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1895{
1896	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1897		;
1898}
1899EXPORT_SYMBOL(ttm_bo_swapout_all);
1900