i915_gem_evict.c revision 1.4
1/*
2 * Copyright �� 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "gem/i915_gem_internal.h"
26#include "gem/i915_gem_pm.h"
27#include "gem/selftests/igt_gem_utils.h"
28#include "gem/selftests/mock_context.h"
29#include "gt/intel_gt.h"
30
31#include "i915_selftest.h"
32
33#include "igt_flush_test.h"
34#include "lib_sw_fence.h"
35#include "mock_drm.h"
36#include "mock_gem_device.h"
37
38static void quirk_add(struct drm_i915_gem_object *obj,
39		      struct list_head *objects)
40{
41	/* quirk is only for live tiled objects, use it to declare ownership */
42	GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
43	i915_gem_object_set_tiling_quirk(obj);
44	list_add(&obj->st_link, objects);
45}
46
47static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
48{
49	struct drm_i915_gem_object *obj;
50	unsigned long count;
51
52	count = 0;
53	do {
54		struct i915_vma *vma;
55
56		obj = i915_gem_object_create_internal(ggtt->vm.i915,
57						      I915_GTT_PAGE_SIZE);
58		if (IS_ERR(obj))
59			return PTR_ERR(obj);
60
61		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
62		if (IS_ERR(vma)) {
63			i915_gem_object_put(obj);
64			if (vma == ERR_PTR(-ENOSPC))
65				break;
66
67			return PTR_ERR(vma);
68		}
69
70		quirk_add(obj, objects);
71		count++;
72	} while (1);
73	pr_debug("Filled GGTT with %lu pages [%llu total]\n",
74		 count, ggtt->vm.total / PAGE_SIZE);
75
76	if (list_empty(&ggtt->vm.bound_list)) {
77		pr_err("No objects on the GGTT inactive list!\n");
78		return -EINVAL;
79	}
80
81	return 0;
82}
83
84static void unpin_ggtt(struct i915_ggtt *ggtt)
85{
86	struct i915_vma *vma;
87
88	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
89		if (i915_gem_object_has_tiling_quirk(vma->obj))
90			i915_vma_unpin(vma);
91}
92
93static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
94{
95	struct drm_i915_gem_object *obj, *on;
96
97	list_for_each_entry_safe(obj, on, list, st_link) {
98		GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
99		i915_gem_object_set_tiling_quirk(obj);
100		i915_gem_object_put(obj);
101	}
102
103	i915_gem_drain_freed_objects(ggtt->vm.i915);
104}
105
106static int igt_evict_something(void *arg)
107{
108	struct intel_gt *gt = arg;
109	struct i915_ggtt *ggtt = gt->ggtt;
110	DRM_LIST_HEAD(objects);
111	int err;
112
113	/* Fill the GGTT with pinned objects and try to evict one. */
114
115	err = populate_ggtt(ggtt, &objects);
116	if (err)
117		goto cleanup;
118
119	/* Everything is pinned, nothing should happen */
120	mutex_lock(&ggtt->vm.mutex);
121	err = i915_gem_evict_something(&ggtt->vm, NULL,
122				       I915_GTT_PAGE_SIZE, 0, 0,
123				       0, U64_MAX,
124				       0);
125	mutex_unlock(&ggtt->vm.mutex);
126	if (err != -ENOSPC) {
127		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
128		       err);
129		goto cleanup;
130	}
131
132	unpin_ggtt(ggtt);
133
134	/* Everything is unpinned, we should be able to evict something */
135	mutex_lock(&ggtt->vm.mutex);
136	err = i915_gem_evict_something(&ggtt->vm, NULL,
137				       I915_GTT_PAGE_SIZE, 0, 0,
138				       0, U64_MAX,
139				       0);
140	mutex_unlock(&ggtt->vm.mutex);
141	if (err) {
142		pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
143		       err);
144		goto cleanup;
145	}
146
147cleanup:
148	cleanup_objects(ggtt, &objects);
149	return err;
150}
151
152static int igt_overcommit(void *arg)
153{
154	struct intel_gt *gt = arg;
155	struct i915_ggtt *ggtt = gt->ggtt;
156	struct drm_i915_gem_object *obj;
157	struct i915_vma *vma;
158	DRM_LIST_HEAD(objects);
159	int err;
160
161	/* Fill the GGTT with pinned objects and then try to pin one more.
162	 * We expect it to fail.
163	 */
164
165	err = populate_ggtt(ggtt, &objects);
166	if (err)
167		goto cleanup;
168
169	obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
170	if (IS_ERR(obj)) {
171		err = PTR_ERR(obj);
172		goto cleanup;
173	}
174
175	quirk_add(obj, &objects);
176
177	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
178	if (vma != ERR_PTR(-ENOSPC)) {
179		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
180		err = -EINVAL;
181		goto cleanup;
182	}
183
184cleanup:
185	cleanup_objects(ggtt, &objects);
186	return err;
187}
188
189static int igt_evict_for_vma(void *arg)
190{
191	struct intel_gt *gt = arg;
192	struct i915_ggtt *ggtt = gt->ggtt;
193	struct drm_mm_node target = {
194		.start = 0,
195		.size = 4096,
196	};
197	DRM_LIST_HEAD(objects);
198	int err;
199
200	/* Fill the GGTT with pinned objects and try to evict a range. */
201
202	err = populate_ggtt(ggtt, &objects);
203	if (err)
204		goto cleanup;
205
206	/* Everything is pinned, nothing should happen */
207	mutex_lock(&ggtt->vm.mutex);
208	err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
209	mutex_unlock(&ggtt->vm.mutex);
210	if (err != -ENOSPC) {
211		pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
212		       err);
213		goto cleanup;
214	}
215
216	unpin_ggtt(ggtt);
217
218	/* Everything is unpinned, we should be able to evict the node */
219	mutex_lock(&ggtt->vm.mutex);
220	err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
221	mutex_unlock(&ggtt->vm.mutex);
222	if (err) {
223		pr_err("i915_gem_evict_for_node returned err=%d\n",
224		       err);
225		goto cleanup;
226	}
227
228cleanup:
229	cleanup_objects(ggtt, &objects);
230	return err;
231}
232
233static void mock_color_adjust(const struct drm_mm_node *node,
234			      unsigned long color,
235			      u64 *start,
236			      u64 *end)
237{
238}
239
240static int igt_evict_for_cache_color(void *arg)
241{
242	struct intel_gt *gt = arg;
243	struct i915_ggtt *ggtt = gt->ggtt;
244	const unsigned long flags = PIN_OFFSET_FIXED;
245	struct drm_mm_node target = {
246		.start = I915_GTT_PAGE_SIZE * 2,
247		.size = I915_GTT_PAGE_SIZE,
248		.color = I915_CACHE_LLC,
249	};
250	struct drm_i915_gem_object *obj;
251	struct i915_vma *vma;
252	DRM_LIST_HEAD(objects);
253	int err;
254
255	/*
256	 * Currently the use of color_adjust for the GGTT is limited to cache
257	 * coloring and guard pages, and so the presence of mm.color_adjust for
258	 * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
259	 * color adjust will work just fine for our purposes.
260	 */
261	ggtt->vm.mm.color_adjust = mock_color_adjust;
262	GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
263
264	obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
265	if (IS_ERR(obj)) {
266		err = PTR_ERR(obj);
267		goto cleanup;
268	}
269	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
270	quirk_add(obj, &objects);
271
272	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
273				       I915_GTT_PAGE_SIZE | flags);
274	if (IS_ERR(vma)) {
275		pr_err("[0]i915_gem_object_ggtt_pin failed\n");
276		err = PTR_ERR(vma);
277		goto cleanup;
278	}
279
280	obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
281	if (IS_ERR(obj)) {
282		err = PTR_ERR(obj);
283		goto cleanup;
284	}
285	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
286	quirk_add(obj, &objects);
287
288	/* Neighbouring; same colour - should fit */
289	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
290				       (I915_GTT_PAGE_SIZE * 2) | flags);
291	if (IS_ERR(vma)) {
292		pr_err("[1]i915_gem_object_ggtt_pin failed\n");
293		err = PTR_ERR(vma);
294		goto cleanup;
295	}
296
297	i915_vma_unpin(vma);
298
299	/* Remove just the second vma */
300	mutex_lock(&ggtt->vm.mutex);
301	err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
302	mutex_unlock(&ggtt->vm.mutex);
303	if (err) {
304		pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
305		goto cleanup;
306	}
307
308	/* Attempt to remove the first *pinned* vma, by removing the (empty)
309	 * neighbour -- this should fail.
310	 */
311	target.color = I915_CACHE_L3_LLC;
312
313	mutex_lock(&ggtt->vm.mutex);
314	err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
315	mutex_unlock(&ggtt->vm.mutex);
316	if (!err) {
317		pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
318		err = -EINVAL;
319		goto cleanup;
320	}
321
322	err = 0;
323
324cleanup:
325	unpin_ggtt(ggtt);
326	cleanup_objects(ggtt, &objects);
327	ggtt->vm.mm.color_adjust = NULL;
328	return err;
329}
330
331static int igt_evict_vm(void *arg)
332{
333	struct intel_gt *gt = arg;
334	struct i915_ggtt *ggtt = gt->ggtt;
335	struct i915_gem_ww_ctx ww;
336	DRM_LIST_HEAD(objects);
337	int err;
338
339	/* Fill the GGTT with pinned objects and try to evict everything. */
340
341	err = populate_ggtt(ggtt, &objects);
342	if (err)
343		goto cleanup;
344
345	/* Everything is pinned, nothing should happen */
346	mutex_lock(&ggtt->vm.mutex);
347	err = i915_gem_evict_vm(&ggtt->vm, NULL);
348	mutex_unlock(&ggtt->vm.mutex);
349	if (err) {
350		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
351		       err);
352		goto cleanup;
353	}
354
355	unpin_ggtt(ggtt);
356
357	for_i915_gem_ww(&ww, err, false) {
358		mutex_lock(&ggtt->vm.mutex);
359		err = i915_gem_evict_vm(&ggtt->vm, &ww);
360		mutex_unlock(&ggtt->vm.mutex);
361	}
362
363	if (err) {
364		pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
365		       err);
366		goto cleanup;
367	}
368
369cleanup:
370	cleanup_objects(ggtt, &objects);
371	return err;
372}
373
374static int igt_evict_contexts(void *arg)
375{
376	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
377	struct intel_gt *gt = arg;
378	struct i915_ggtt *ggtt = gt->ggtt;
379	struct drm_i915_private *i915 = gt->i915;
380	struct intel_engine_cs *engine;
381	enum intel_engine_id id;
382	struct reserved {
383		struct drm_mm_node node;
384		struct reserved *next;
385	} *reserved = NULL;
386	intel_wakeref_t wakeref;
387	struct drm_mm_node hole;
388	unsigned long count;
389	int err;
390
391	/*
392	 * The purpose of this test is to verify that we will trigger an
393	 * eviction in the GGTT when constructing a request that requires
394	 * additional space in the GGTT for pinning the context. This space
395	 * is not directly tied to the request so reclaiming it requires
396	 * extra work.
397	 *
398	 * As such this test is only meaningful for full-ppgtt environments
399	 * where the GTT space of the request is separate from the GGTT
400	 * allocation required to build the request.
401	 */
402	if (!HAS_FULL_PPGTT(i915))
403		return 0;
404
405	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
406
407	/* Reserve a block so that we know we have enough to fit a few rq */
408	memset(&hole, 0, sizeof(hole));
409	mutex_lock(&ggtt->vm.mutex);
410	err = i915_gem_gtt_insert(&ggtt->vm, NULL, &hole,
411				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
412				  0, ggtt->vm.total,
413				  PIN_NOEVICT);
414	if (err)
415		goto out_locked;
416
417	/* Make the GGTT appear small by filling it with unevictable nodes */
418	count = 0;
419	do {
420		struct reserved *r;
421
422		mutex_unlock(&ggtt->vm.mutex);
423		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
424		mutex_lock(&ggtt->vm.mutex);
425		if (!r) {
426			err = -ENOMEM;
427			goto out_locked;
428		}
429
430		if (i915_gem_gtt_insert(&ggtt->vm, NULL, &r->node,
431					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
432					0, ggtt->vm.total,
433					PIN_NOEVICT)) {
434			kfree(r);
435			break;
436		}
437
438		r->next = reserved;
439		reserved = r;
440
441		count++;
442	} while (1);
443	drm_mm_remove_node(&hole);
444	mutex_unlock(&ggtt->vm.mutex);
445	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
446
447	/* Overfill the GGTT with context objects and so try to evict one. */
448	for_each_engine(engine, gt, id) {
449		struct i915_sw_fence fence;
450		struct i915_request *last = NULL;
451
452		count = 0;
453		onstack_fence_init(&fence);
454		do {
455			struct intel_context *ce;
456			struct i915_request *rq;
457
458			ce = intel_context_create(engine);
459			if (IS_ERR(ce))
460				break;
461
462			/* We will need some GGTT space for the rq's context */
463			igt_evict_ctl.fail_if_busy = true;
464			rq = intel_context_create_request(ce);
465			igt_evict_ctl.fail_if_busy = false;
466			intel_context_put(ce);
467
468			if (IS_ERR(rq)) {
469				/* When full, fail_if_busy will trigger EBUSY */
470				if (PTR_ERR(rq) != -EBUSY) {
471					pr_err("Unexpected error from request alloc (on %s): %d\n",
472					       engine->name,
473					       (int)PTR_ERR(rq));
474					err = PTR_ERR(rq);
475				}
476				break;
477			}
478
479			/* Keep every request/ctx pinned until we are full */
480			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
481							       &fence,
482							       GFP_KERNEL);
483			if (err < 0)
484				break;
485
486			i915_request_add(rq);
487			count++;
488			if (last)
489				i915_request_put(last);
490			last = i915_request_get(rq);
491			err = 0;
492		} while(1);
493		onstack_fence_fini(&fence);
494		pr_info("Submitted %lu contexts/requests on %s\n",
495			count, engine->name);
496		if (err)
497			break;
498		if (last) {
499			if (i915_request_wait(last, 0, HZ) < 0) {
500				err = -EIO;
501				i915_request_put(last);
502				pr_err("Failed waiting for last request (on %s)",
503				       engine->name);
504				break;
505			}
506			i915_request_put(last);
507		}
508		err = intel_gt_wait_for_idle(engine->gt, HZ * 3);
509		if (err) {
510			pr_err("Failed to idle GT (on %s)", engine->name);
511			break;
512		}
513	}
514
515	mutex_lock(&ggtt->vm.mutex);
516out_locked:
517	if (igt_flush_test(i915))
518		err = -EIO;
519	while (reserved) {
520		struct reserved *next = reserved->next;
521
522		drm_mm_remove_node(&reserved->node);
523		kfree(reserved);
524
525		reserved = next;
526	}
527	if (drm_mm_node_allocated(&hole))
528		drm_mm_remove_node(&hole);
529	mutex_unlock(&ggtt->vm.mutex);
530	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
531
532	return err;
533}
534
535int i915_gem_evict_mock_selftests(void)
536{
537	static const struct i915_subtest tests[] = {
538		SUBTEST(igt_evict_something),
539		SUBTEST(igt_evict_for_vma),
540		SUBTEST(igt_evict_for_cache_color),
541		SUBTEST(igt_evict_vm),
542		SUBTEST(igt_overcommit),
543	};
544	struct drm_i915_private *i915;
545	intel_wakeref_t wakeref;
546	int err = 0;
547
548	i915 = mock_gem_device();
549	if (!i915)
550		return -ENOMEM;
551
552	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
553		err = i915_subtests(tests, to_gt(i915));
554
555	mock_destroy_device(i915);
556	return err;
557}
558
559int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
560{
561	static const struct i915_subtest tests[] = {
562		SUBTEST(igt_evict_contexts),
563	};
564
565	if (intel_gt_is_wedged(to_gt(i915)))
566		return 0;
567
568	return intel_gt_live_subtests(tests, to_gt(i915));
569}
570