• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/gpu/drm/i915/
1/*
2 * Copyright �� 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Eric Anholt <eric@anholt.net>
25 *    Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drv.h"
32#include "i915_drm.h"
33
34static struct drm_i915_gem_object *
35i915_gem_next_active_object(struct drm_device *dev,
36			    struct list_head **render_iter,
37			    struct list_head **bsd_iter)
38{
39	drm_i915_private_t *dev_priv = dev->dev_private;
40	struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
41
42	if (*render_iter != &dev_priv->render_ring.active_list)
43		render_obj = list_entry(*render_iter,
44					struct drm_i915_gem_object,
45					list);
46
47	if (HAS_BSD(dev)) {
48		if (*bsd_iter != &dev_priv->bsd_ring.active_list)
49			bsd_obj = list_entry(*bsd_iter,
50					     struct drm_i915_gem_object,
51					     list);
52
53		if (render_obj == NULL) {
54			*bsd_iter = (*bsd_iter)->next;
55			return bsd_obj;
56		}
57
58		if (bsd_obj == NULL) {
59			*render_iter = (*render_iter)->next;
60			return render_obj;
61		}
62
63		if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
64			*render_iter = (*render_iter)->next;
65			return render_obj;
66		} else {
67			*bsd_iter = (*bsd_iter)->next;
68			return bsd_obj;
69		}
70	} else {
71		*render_iter = (*render_iter)->next;
72		return render_obj;
73	}
74}
75
76static bool
77mark_free(struct drm_i915_gem_object *obj_priv,
78	   struct list_head *unwind)
79{
80	list_add(&obj_priv->evict_list, unwind);
81	drm_gem_object_reference(&obj_priv->base);
82	return drm_mm_scan_add_block(obj_priv->gtt_space);
83}
84
85#define i915_for_each_active_object(OBJ, R, B) \
86	*(R) = dev_priv->render_ring.active_list.next; \
87	*(B) = dev_priv->bsd_ring.active_list.next; \
88	while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
89
90int
91i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
92{
93	drm_i915_private_t *dev_priv = dev->dev_private;
94	struct list_head eviction_list, unwind_list;
95	struct drm_i915_gem_object *obj_priv;
96	struct list_head *render_iter, *bsd_iter;
97	int ret = 0;
98
99	i915_gem_retire_requests(dev);
100
101	/* Re-check for free space after retiring requests */
102	if (drm_mm_search_free(&dev_priv->mm.gtt_space,
103			       min_size, alignment, 0))
104		return 0;
105
106	/*
107	 * The goal is to evict objects and amalgamate space in LRU order.
108	 * The oldest idle objects reside on the inactive list, which is in
109	 * retirement order. The next objects to retire are those on the (per
110	 * ring) active list that do not have an outstanding flush. Once the
111	 * hardware reports completion (the seqno is updated after the
112	 * batchbuffer has been finished) the clean buffer objects would
113	 * be retired to the inactive list. Any dirty objects would be added
114	 * to the tail of the flushing list. So after processing the clean
115	 * active objects we need to emit a MI_FLUSH to retire the flushing
116	 * list, hence the retirement order of the flushing list is in
117	 * advance of the dirty objects on the active lists.
118	 *
119	 * The retirement sequence is thus:
120	 *   1. Inactive objects (already retired)
121	 *   2. Clean active objects
122	 *   3. Flushing list
123	 *   4. Dirty active objects.
124	 *
125	 * On each list, the oldest objects lie at the HEAD with the freshest
126	 * object on the TAIL.
127	 */
128
129	INIT_LIST_HEAD(&unwind_list);
130	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
131
132	/* First see if there is a large enough contiguous idle region... */
133	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
134		if (mark_free(obj_priv, &unwind_list))
135			goto found;
136	}
137
138	/* Now merge in the soon-to-be-expired objects... */
139	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
140		/* Does the object require an outstanding flush? */
141		if (obj_priv->base.write_domain || obj_priv->pin_count)
142			continue;
143
144		if (mark_free(obj_priv, &unwind_list))
145			goto found;
146	}
147
148	/* Finally add anything with a pending flush (in order of retirement) */
149	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
150		if (obj_priv->pin_count)
151			continue;
152
153		if (mark_free(obj_priv, &unwind_list))
154			goto found;
155	}
156	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
157		if (! obj_priv->base.write_domain || obj_priv->pin_count)
158			continue;
159
160		if (mark_free(obj_priv, &unwind_list))
161			goto found;
162	}
163
164	/* Nothing found, clean up and bail out! */
165	list_for_each_entry(obj_priv, &unwind_list, evict_list) {
166		ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
167		BUG_ON(ret);
168		drm_gem_object_unreference(&obj_priv->base);
169	}
170
171	/* We expect the caller to unpin, evict all and try again, or give up.
172	 * So calling i915_gem_evict_everything() is unnecessary.
173	 */
174	return -ENOSPC;
175
176found:
177	/* drm_mm doesn't allow any other other operations while
178	 * scanning, therefore store to be evicted objects on a
179	 * temporary list. */
180	INIT_LIST_HEAD(&eviction_list);
181	while (!list_empty(&unwind_list)) {
182		obj_priv = list_first_entry(&unwind_list,
183					    struct drm_i915_gem_object,
184					    evict_list);
185		if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
186			list_move(&obj_priv->evict_list, &eviction_list);
187			continue;
188		}
189		list_del(&obj_priv->evict_list);
190		drm_gem_object_unreference(&obj_priv->base);
191	}
192
193	/* Unbinding will emit any required flushes */
194	while (!list_empty(&eviction_list)) {
195		obj_priv = list_first_entry(&eviction_list,
196					    struct drm_i915_gem_object,
197					    evict_list);
198		if (ret == 0)
199			ret = i915_gem_object_unbind(&obj_priv->base);
200		list_del(&obj_priv->evict_list);
201		drm_gem_object_unreference(&obj_priv->base);
202	}
203
204	return ret;
205}
206
207int
208i915_gem_evict_everything(struct drm_device *dev)
209{
210	drm_i915_private_t *dev_priv = dev->dev_private;
211	int ret;
212	bool lists_empty;
213
214	spin_lock(&dev_priv->mm.active_list_lock);
215	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
216		       list_empty(&dev_priv->mm.flushing_list) &&
217		       list_empty(&dev_priv->render_ring.active_list) &&
218		       (!HAS_BSD(dev)
219			|| list_empty(&dev_priv->bsd_ring.active_list)));
220	spin_unlock(&dev_priv->mm.active_list_lock);
221
222	if (lists_empty)
223		return -ENOSPC;
224
225	/* Flush everything (on to the inactive lists) and evict */
226	ret = i915_gpu_idle(dev);
227	if (ret)
228		return ret;
229
230	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
231
232	ret = i915_gem_evict_inactive(dev);
233	if (ret)
234		return ret;
235
236	spin_lock(&dev_priv->mm.active_list_lock);
237	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
238		       list_empty(&dev_priv->mm.flushing_list) &&
239		       list_empty(&dev_priv->render_ring.active_list) &&
240		       (!HAS_BSD(dev)
241			|| list_empty(&dev_priv->bsd_ring.active_list)));
242	spin_unlock(&dev_priv->mm.active_list_lock);
243	BUG_ON(!lists_empty);
244
245	return 0;
246}
247
248/** Unbinds all inactive objects. */
249int
250i915_gem_evict_inactive(struct drm_device *dev)
251{
252	drm_i915_private_t *dev_priv = dev->dev_private;
253
254	while (!list_empty(&dev_priv->mm.inactive_list)) {
255		struct drm_gem_object *obj;
256		int ret;
257
258		obj = &list_first_entry(&dev_priv->mm.inactive_list,
259					struct drm_i915_gem_object,
260					list)->base;
261
262		ret = i915_gem_object_unbind(obj);
263		if (ret != 0) {
264			DRM_ERROR("Error unbinding object: %d\n", ret);
265			return ret;
266		}
267	}
268
269	return 0;
270}
271