i915_gem_execbuffer.c revision 290228
1235783Skib/*
2235783Skib * Copyright �� 2008,2010 Intel Corporation
3235783Skib *
4235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
5235783Skib * copy of this software and associated documentation files (the "Software"),
6235783Skib * to deal in the Software without restriction, including without limitation
7235783Skib * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8235783Skib * and/or sell copies of the Software, and to permit persons to whom the
9235783Skib * Software is furnished to do so, subject to the following conditions:
10235783Skib *
11235783Skib * The above copyright notice and this permission notice (including the next
12235783Skib * paragraph) shall be included in all copies or substantial portions of the
13235783Skib * Software.
14235783Skib *
15235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16235783Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17235783Skib * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18235783Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19235783Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20235783Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21235783Skib * IN THE SOFTWARE.
22235783Skib *
23235783Skib * Authors:
24235783Skib *    Eric Anholt <eric@anholt.net>
25235783Skib *    Chris Wilson <chris@chris-wilson.co.uk>
26235783Skib *
27235783Skib */
28235783Skib
29235783Skib#include <sys/cdefs.h>
30235783Skib__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_gem_execbuffer.c 290228 2015-10-31 15:09:31Z dumbbell $");
31235783Skib
32235783Skib#include <dev/drm2/drmP.h>
33235783Skib#include <dev/drm2/drm.h>
34235783Skib#include <dev/drm2/i915/i915_drm.h>
35235783Skib#include <dev/drm2/i915/i915_drv.h>
36235783Skib#include <dev/drm2/i915/intel_drv.h>
37235783Skib#include <sys/limits.h>
38235783Skib#include <sys/sf_buf.h>
39235783Skib
40235783Skibstruct change_domains {
41235783Skib	uint32_t invalidate_domains;
42235783Skib	uint32_t flush_domains;
43235783Skib	uint32_t flush_rings;
44235783Skib	uint32_t flips;
45235783Skib};
46235783Skib
47235783Skib/*
48235783Skib * Set the next domain for the specified object. This
49235783Skib * may not actually perform the necessary flushing/invaliding though,
50235783Skib * as that may want to be batched with other set_domain operations
51235783Skib *
52235783Skib * This is (we hope) the only really tricky part of gem. The goal
53235783Skib * is fairly simple -- track which caches hold bits of the object
54235783Skib * and make sure they remain coherent. A few concrete examples may
55235783Skib * help to explain how it works. For shorthand, we use the notation
56235783Skib * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
57235783Skib * a pair of read and write domain masks.
58235783Skib *
59235783Skib * Case 1: the batch buffer
60235783Skib *
61235783Skib *	1. Allocated
62235783Skib *	2. Written by CPU
63235783Skib *	3. Mapped to GTT
64235783Skib *	4. Read by GPU
65235783Skib *	5. Unmapped from GTT
66235783Skib *	6. Freed
67235783Skib *
68235783Skib *	Let's take these a step at a time
69235783Skib *
70235783Skib *	1. Allocated
71235783Skib *		Pages allocated from the kernel may still have
72235783Skib *		cache contents, so we set them to (CPU, CPU) always.
73235783Skib *	2. Written by CPU (using pwrite)
74235783Skib *		The pwrite function calls set_domain (CPU, CPU) and
75235783Skib *		this function does nothing (as nothing changes)
76235783Skib *	3. Mapped by GTT
77235783Skib *		This function asserts that the object is not
78235783Skib *		currently in any GPU-based read or write domains
79235783Skib *	4. Read by GPU
80235783Skib *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
81235783Skib *		As write_domain is zero, this function adds in the
82235783Skib *		current read domains (CPU+COMMAND, 0).
83235783Skib *		flush_domains is set to CPU.
84235783Skib *		invalidate_domains is set to COMMAND
85235783Skib *		clflush is run to get data out of the CPU caches
86235783Skib *		then i915_dev_set_domain calls i915_gem_flush to
87235783Skib *		emit an MI_FLUSH and drm_agp_chipset_flush
88235783Skib *	5. Unmapped from GTT
89235783Skib *		i915_gem_object_unbind calls set_domain (CPU, CPU)
90235783Skib *		flush_domains and invalidate_domains end up both zero
91235783Skib *		so no flushing/invalidating happens
92235783Skib *	6. Freed
93235783Skib *		yay, done
94235783Skib *
95235783Skib * Case 2: The shared render buffer
96235783Skib *
97235783Skib *	1. Allocated
98235783Skib *	2. Mapped to GTT
99235783Skib *	3. Read/written by GPU
100235783Skib *	4. set_domain to (CPU,CPU)
101235783Skib *	5. Read/written by CPU
102235783Skib *	6. Read/written by GPU
103235783Skib *
104235783Skib *	1. Allocated
105235783Skib *		Same as last example, (CPU, CPU)
106235783Skib *	2. Mapped to GTT
107235783Skib *		Nothing changes (assertions find that it is not in the GPU)
108235783Skib *	3. Read/written by GPU
109235783Skib *		execbuffer calls set_domain (RENDER, RENDER)
110235783Skib *		flush_domains gets CPU
111235783Skib *		invalidate_domains gets GPU
112235783Skib *		clflush (obj)
113235783Skib *		MI_FLUSH and drm_agp_chipset_flush
114235783Skib *	4. set_domain (CPU, CPU)
115235783Skib *		flush_domains gets GPU
116235783Skib *		invalidate_domains gets CPU
117235783Skib *		wait_rendering (obj) to make sure all drawing is complete.
118235783Skib *		This will include an MI_FLUSH to get the data from GPU
119235783Skib *		to memory
120235783Skib *		clflush (obj) to invalidate the CPU cache
121235783Skib *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
122235783Skib *	5. Read/written by CPU
123235783Skib *		cache lines are loaded and dirtied
124235783Skib *	6. Read written by GPU
125235783Skib *		Same as last GPU access
126235783Skib *
127235783Skib * Case 3: The constant buffer
128235783Skib *
129235783Skib *	1. Allocated
130235783Skib *	2. Written by CPU
131235783Skib *	3. Read by GPU
132235783Skib *	4. Updated (written) by CPU again
133235783Skib *	5. Read by GPU
134235783Skib *
135235783Skib *	1. Allocated
136235783Skib *		(CPU, CPU)
137235783Skib *	2. Written by CPU
138235783Skib *		(CPU, CPU)
139235783Skib *	3. Read by GPU
140235783Skib *		(CPU+RENDER, 0)
141235783Skib *		flush_domains = CPU
142235783Skib *		invalidate_domains = RENDER
143235783Skib *		clflush (obj)
144235783Skib *		MI_FLUSH
145235783Skib *		drm_agp_chipset_flush
146235783Skib *	4. Updated (written) by CPU again
147235783Skib *		(CPU, CPU)
148235783Skib *		flush_domains = 0 (no previous write domain)
149235783Skib *		invalidate_domains = 0 (no new read domains)
150235783Skib *	5. Read by GPU
151235783Skib *		(CPU+RENDER, 0)
152235783Skib *		flush_domains = CPU
153235783Skib *		invalidate_domains = RENDER
154235783Skib *		clflush (obj)
155235783Skib *		MI_FLUSH
156235783Skib *		drm_agp_chipset_flush
157235783Skib */
158235783Skibstatic void
159235783Skibi915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
160235783Skib				  struct intel_ring_buffer *ring,
161235783Skib				  struct change_domains *cd)
162235783Skib{
163235783Skib	uint32_t invalidate_domains = 0, flush_domains = 0;
164235783Skib
165235783Skib	/*
166235783Skib	 * If the object isn't moving to a new write domain,
167235783Skib	 * let the object stay in multiple read domains
168235783Skib	 */
169235783Skib	if (obj->base.pending_write_domain == 0)
170235783Skib		obj->base.pending_read_domains |= obj->base.read_domains;
171235783Skib
172235783Skib	/*
173235783Skib	 * Flush the current write domain if
174235783Skib	 * the new read domains don't match. Invalidate
175235783Skib	 * any read domains which differ from the old
176235783Skib	 * write domain
177235783Skib	 */
178235783Skib	if (obj->base.write_domain &&
179235783Skib	    (((obj->base.write_domain != obj->base.pending_read_domains ||
180235783Skib	       obj->ring != ring)) ||
181235783Skib	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
182235783Skib		flush_domains |= obj->base.write_domain;
183235783Skib		invalidate_domains |=
184235783Skib			obj->base.pending_read_domains & ~obj->base.write_domain;
185235783Skib	}
186235783Skib	/*
187235783Skib	 * Invalidate any read caches which may have
188235783Skib	 * stale data. That is, any new read domains.
189235783Skib	 */
190235783Skib	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
191235783Skib	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
192235783Skib		i915_gem_clflush_object(obj);
193235783Skib
194235783Skib	if (obj->base.pending_write_domain)
195255013Sjkim		cd->flips |= atomic_load_acq_int(&obj->pending_flip);
196235783Skib
197235783Skib	/* The actual obj->write_domain will be updated with
198235783Skib	 * pending_write_domain after we emit the accumulated flush for all
199235783Skib	 * of our domain changes in execbuffers (which clears objects'
200235783Skib	 * write_domains).  So if we have a current write domain that we
201235783Skib	 * aren't changing, set pending_write_domain to that.
202235783Skib	 */
203235783Skib	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
204235783Skib		obj->base.pending_write_domain = obj->base.write_domain;
205235783Skib
206235783Skib	cd->invalidate_domains |= invalidate_domains;
207235783Skib	cd->flush_domains |= flush_domains;
208235783Skib	if (flush_domains & I915_GEM_GPU_DOMAINS)
209235783Skib		cd->flush_rings |= intel_ring_flag(obj->ring);
210235783Skib	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
211235783Skib		cd->flush_rings |= intel_ring_flag(ring);
212235783Skib}
213235783Skib
214235783Skibstruct eb_objects {
215235783Skib	u_long hashmask;
216235783Skib	LIST_HEAD(, drm_i915_gem_object) *buckets;
217235783Skib};
218235783Skib
219235783Skibstatic struct eb_objects *
220235783Skibeb_create(int size)
221235783Skib{
222235783Skib	struct eb_objects *eb;
223235783Skib
224290228Sdumbbell	eb = malloc(sizeof(*eb),
225290228Sdumbbell		     DRM_I915_GEM, M_WAITOK | M_ZERO);
226235783Skib	eb->buckets = hashinit(size, DRM_I915_GEM, &eb->hashmask);
227290228Sdumbbell	return eb;
228235783Skib}
229235783Skib
230235783Skibstatic void
231235783Skibeb_reset(struct eb_objects *eb)
232235783Skib{
233235783Skib	int i;
234235783Skib
235235783Skib	for (i = 0; i <= eb->hashmask; i++)
236235783Skib		LIST_INIT(&eb->buckets[i]);
237235783Skib}
238235783Skib
239235783Skibstatic void
240235783Skibeb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
241235783Skib{
242235783Skib
243235783Skib	LIST_INSERT_HEAD(&eb->buckets[obj->exec_handle & eb->hashmask],
244235783Skib	    obj, exec_node);
245235783Skib}
246235783Skib
247235783Skibstatic struct drm_i915_gem_object *
248235783Skibeb_get_object(struct eb_objects *eb, unsigned long handle)
249235783Skib{
250235783Skib	struct drm_i915_gem_object *obj;
251235783Skib
252235783Skib	LIST_FOREACH(obj, &eb->buckets[handle & eb->hashmask], exec_node) {
253235783Skib		if (obj->exec_handle == handle)
254290228Sdumbbell			return obj;
255235783Skib	}
256290228Sdumbbell
257290228Sdumbbell	return NULL;
258235783Skib}
259235783Skib
260235783Skibstatic void
261235783Skibeb_destroy(struct eb_objects *eb)
262235783Skib{
263235783Skib
264235783Skib	free(eb->buckets, DRM_I915_GEM);
265235783Skib	free(eb, DRM_I915_GEM);
266235783Skib}
267235783Skib
268277487Skibstatic inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
269277487Skib{
270277487Skib	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
271277487Skib		obj->cache_level != I915_CACHE_NONE);
272277487Skib}
273277487Skib
274235783Skibstatic int
275235783Skibi915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
276235783Skib				   struct eb_objects *eb,
277235783Skib				   struct drm_i915_gem_relocation_entry *reloc)
278235783Skib{
279235783Skib	struct drm_device *dev = obj->base.dev;
280235783Skib	struct drm_gem_object *target_obj;
281277487Skib	struct drm_i915_gem_object *target_i915_obj;
282235783Skib	uint32_t target_offset;
283235783Skib	int ret = -EINVAL;
284235783Skib
285235783Skib	/* we've already hold a reference to all valid objects */
286235783Skib	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
287235783Skib	if (unlikely(target_obj == NULL))
288235783Skib		return -ENOENT;
289235783Skib
290277487Skib	target_i915_obj = to_intel_bo(target_obj);
291277487Skib	target_offset = target_i915_obj->gtt_offset;
292235783Skib
293235783Skib#if WATCH_RELOC
294235783Skib	DRM_INFO("%s: obj %p offset %08x target %d "
295235783Skib		 "read %08x write %08x gtt %08x "
296235783Skib		 "presumed %08x delta %08x\n",
297235783Skib		 __func__,
298235783Skib		 obj,
299235783Skib		 (int) reloc->offset,
300235783Skib		 (int) reloc->target_handle,
301235783Skib		 (int) reloc->read_domains,
302235783Skib		 (int) reloc->write_domain,
303235783Skib		 (int) target_offset,
304235783Skib		 (int) reloc->presumed_offset,
305235783Skib		 reloc->delta);
306235783Skib#endif
307235783Skib
308235783Skib	/* The target buffer should have appeared before us in the
309235783Skib	 * exec_object list, so it should have a GTT space bound by now.
310235783Skib	 */
311235783Skib	if (unlikely(target_offset == 0)) {
312235783Skib		DRM_DEBUG("No GTT space found for object %d\n",
313235783Skib			  reloc->target_handle);
314235783Skib		return ret;
315235783Skib	}
316235783Skib
317235783Skib	/* Validate that the target is in a valid r/w GPU domain */
318235783Skib	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
319235783Skib		DRM_DEBUG("reloc with multiple write domains: "
320235783Skib			  "obj %p target %d offset %d "
321235783Skib			  "read %08x write %08x",
322235783Skib			  obj, reloc->target_handle,
323235783Skib			  (int) reloc->offset,
324235783Skib			  reloc->read_domains,
325235783Skib			  reloc->write_domain);
326235783Skib		return ret;
327235783Skib	}
328235783Skib	if (unlikely((reloc->write_domain | reloc->read_domains)
329235783Skib		     & ~I915_GEM_GPU_DOMAINS)) {
330235783Skib		DRM_DEBUG("reloc with read/write non-GPU domains: "
331235783Skib			  "obj %p target %d offset %d "
332235783Skib			  "read %08x write %08x",
333235783Skib			  obj, reloc->target_handle,
334235783Skib			  (int) reloc->offset,
335235783Skib			  reloc->read_domains,
336235783Skib			  reloc->write_domain);
337235783Skib		return ret;
338235783Skib	}
339235783Skib	if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
340235783Skib		     reloc->write_domain != target_obj->pending_write_domain)) {
341235783Skib		DRM_DEBUG("Write domain conflict: "
342235783Skib			  "obj %p target %d offset %d "
343235783Skib			  "new %08x old %08x\n",
344235783Skib			  obj, reloc->target_handle,
345235783Skib			  (int) reloc->offset,
346235783Skib			  reloc->write_domain,
347235783Skib			  target_obj->pending_write_domain);
348235783Skib		return ret;
349235783Skib	}
350235783Skib
351235783Skib	target_obj->pending_read_domains |= reloc->read_domains;
352235783Skib	target_obj->pending_write_domain |= reloc->write_domain;
353235783Skib
354235783Skib	/* If the relocation already has the right value in it, no
355235783Skib	 * more work needs to be done.
356235783Skib	 */
357235783Skib	if (target_offset == reloc->presumed_offset)
358235783Skib		return 0;
359235783Skib
360235783Skib	/* Check that the relocation address is valid... */
361235783Skib	if (unlikely(reloc->offset > obj->base.size - 4)) {
362235783Skib		DRM_DEBUG("Relocation beyond object bounds: "
363235783Skib			  "obj %p target %d offset %d size %d.\n",
364235783Skib			  obj, reloc->target_handle,
365235783Skib			  (int) reloc->offset,
366235783Skib			  (int) obj->base.size);
367235783Skib		return ret;
368235783Skib	}
369235783Skib	if (unlikely(reloc->offset & 3)) {
370235783Skib		DRM_DEBUG("Relocation not 4-byte aligned: "
371235783Skib			  "obj %p target %d offset %d.\n",
372235783Skib			  obj, reloc->target_handle,
373235783Skib			  (int) reloc->offset);
374235783Skib		return ret;
375235783Skib	}
376235783Skib
377277487Skib	/* We can't wait for rendering with pagefaults disabled */
378277487Skib	if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
379290228Sdumbbell		return -EFAULT;
380277487Skib
381235783Skib	reloc->delta += target_offset;
382277487Skib	if (use_cpu_reloc(obj)) {
383235783Skib		uint32_t page_offset = reloc->offset & PAGE_MASK;
384235783Skib		char *vaddr;
385235783Skib		struct sf_buf *sf;
386235783Skib
387277487Skib		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
388277487Skib		if (ret)
389277487Skib			return ret;
390277487Skib
391235783Skib		sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)],
392235783Skib		    SFB_NOWAIT);
393235783Skib		if (sf == NULL)
394290228Sdumbbell			return -ENOMEM;
395235783Skib		vaddr = (void *)sf_buf_kva(sf);
396235783Skib		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
397235783Skib		sf_buf_free(sf);
398235783Skib	} else {
399235783Skib		uint32_t *reloc_entry;
400235783Skib		char *reloc_page;
401235783Skib
402277487Skib		ret = i915_gem_object_set_to_gtt_domain(obj, true);
403235783Skib		if (ret)
404235783Skib			return ret;
405235783Skib
406277487Skib		ret = i915_gem_object_put_fence(obj);
407277487Skib		if (ret)
408277487Skib			return ret;
409277487Skib
410287174Sbapt		/* Map the page containing the relocation we're going to perform.  */
411235783Skib		reloc->offset += obj->gtt_offset;
412235783Skib		reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
413235783Skib		    ~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
414235783Skib		reloc_entry = (uint32_t *)(reloc_page + (reloc->offset &
415235783Skib		    PAGE_MASK));
416235783Skib		*(volatile uint32_t *)reloc_entry = reloc->delta;
417235783Skib		pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
418235783Skib	}
419235783Skib
420277487Skib	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
421277487Skib	 * pipe_control writes because the gpu doesn't properly redirect them
422277487Skib	 * through the ppgtt for non_secure batchbuffers. */
423277487Skib	if (unlikely(IS_GEN6(dev) &&
424277487Skib	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
425277487Skib	    !target_i915_obj->has_global_gtt_mapping)) {
426277487Skib		i915_gem_gtt_bind_object(target_i915_obj,
427277487Skib					 target_i915_obj->cache_level);
428277487Skib	}
429277487Skib
430235783Skib	/* and update the user's relocation entry */
431235783Skib	reloc->presumed_offset = target_offset;
432235783Skib
433235783Skib	return 0;
434235783Skib}
435235783Skib
436235783Skibstatic int
437235783Skibi915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
438287174Sbapt				    struct eb_objects *eb)
439235783Skib{
440277487Skib#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
441277487Skib	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
442235783Skib	struct drm_i915_gem_relocation_entry *user_relocs;
443235783Skib	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
444277487Skib	int remain, ret;
445235783Skib
446235783Skib	user_relocs = (void *)(uintptr_t)entry->relocs_ptr;
447277487Skib	remain = entry->relocation_count;
448277487Skib	while (remain) {
449277487Skib		struct drm_i915_gem_relocation_entry *r = stack_reloc;
450277487Skib		int count = remain;
451277487Skib		if (count > DRM_ARRAY_SIZE(stack_reloc))
452277487Skib			count = DRM_ARRAY_SIZE(stack_reloc);
453277487Skib		remain -= count;
454235783Skib
455277487Skib		ret = -copyin_nofault(user_relocs, r, count*sizeof(r[0]));
456235783Skib		if (ret != 0)
457235783Skib			return (ret);
458235783Skib
459277487Skib		do {
460277487Skib			u64 offset = r->presumed_offset;
461287174Sbapt
462277487Skib			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
463277487Skib			if (ret)
464277487Skib				return ret;
465277487Skib
466277487Skib			if (r->presumed_offset != offset &&
467277487Skib			    copyout_nofault(&r->presumed_offset,
468277487Skib					    &user_relocs->presumed_offset,
469277487Skib					    sizeof(r->presumed_offset))) {
470277487Skib				return -EFAULT;
471277487Skib			}
472277487Skib
473277487Skib			user_relocs++;
474277487Skib			r++;
475277487Skib		} while (--count);
476235783Skib	}
477287174Sbapt
478287174Sbapt	return 0;
479277487Skib#undef N_RELOC
480235783Skib}
481235783Skib
482235783Skibstatic int
483235783Skibi915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
484287174Sbapt					 struct eb_objects *eb,
485287174Sbapt					 struct drm_i915_gem_relocation_entry *relocs)
486235783Skib{
487235783Skib	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
488235783Skib	int i, ret;
489235783Skib
490235783Skib	for (i = 0; i < entry->relocation_count; i++) {
491235783Skib		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
492235783Skib		if (ret)
493235783Skib			return ret;
494235783Skib	}
495235783Skib
496235783Skib	return 0;
497235783Skib}
498235783Skib
499235783Skibstatic int
500235783Skibi915_gem_execbuffer_relocate(struct drm_device *dev,
501235783Skib			     struct eb_objects *eb,
502235783Skib			     struct list_head *objects)
503235783Skib{
504235783Skib	struct drm_i915_gem_object *obj;
505235783Skib	int ret, pflags;
506235783Skib
507235783Skib	/* Try to move as many of the relocation targets off the active list
508235783Skib	 * to avoid unnecessary fallbacks to the slow path, as we cannot wait
509235783Skib	 * for the retirement with pagefaults disabled.
510235783Skib	 */
511235783Skib	i915_gem_retire_requests(dev);
512235783Skib
513235783Skib	ret = 0;
514235783Skib	/* This is the fast path and we cannot handle a pagefault whilst
515235783Skib	 * holding the device lock lest the user pass in the relocations
516235783Skib	 * contained within a mmaped bo. For in such a case we, the page
517235783Skib	 * fault handler would call i915_gem_fault() and we would try to
518235783Skib	 * acquire the device lock again. Obviously this is bad.
519235783Skib	 */
520290228Sdumbbell	pflags = vm_fault_disable_pagefaults();
521235783Skib	list_for_each_entry(obj, objects, exec_list) {
522235783Skib		ret = i915_gem_execbuffer_relocate_object(obj, eb);
523287174Sbapt		if (ret)
524235783Skib			break;
525235783Skib	}
526235783Skib	vm_fault_enable_pagefaults(pflags);
527287174Sbapt
528287174Sbapt	return ret;
529235783Skib}
530235783Skib
531235783Skib#define  __EXEC_OBJECT_HAS_FENCE (1<<31)
532235783Skib
533235783Skibstatic int
534277487Skibneed_reloc_mappable(struct drm_i915_gem_object *obj)
535277487Skib{
536277487Skib	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
537277487Skib	return entry->relocation_count && !use_cpu_reloc(obj);
538277487Skib}
539277487Skib
540277487Skibstatic int
541235783Skibpin_and_fence_object(struct drm_i915_gem_object *obj,
542235783Skib		     struct intel_ring_buffer *ring)
543235783Skib{
544235783Skib	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
545235783Skib	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
546235783Skib	bool need_fence, need_mappable;
547235783Skib	int ret;
548235783Skib
549235783Skib	need_fence =
550235783Skib		has_fenced_gpu_access &&
551235783Skib		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
552235783Skib		obj->tiling_mode != I915_TILING_NONE;
553277487Skib	need_mappable = need_fence || need_reloc_mappable(obj);
554235783Skib
555235783Skib	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
556235783Skib	if (ret)
557235783Skib		return ret;
558235783Skib
559235783Skib	if (has_fenced_gpu_access) {
560235783Skib		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
561277487Skib			ret = i915_gem_object_get_fence(obj);
562277487Skib			if (ret)
563277487Skib				goto err_unpin;
564235783Skib
565277487Skib			if (i915_gem_object_pin_fence(obj))
566235783Skib				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
567277487Skib
568235783Skib			obj->pending_fenced_gpu_access = true;
569235783Skib		}
570235783Skib	}
571235783Skib
572235783Skib	entry->offset = obj->gtt_offset;
573235783Skib	return 0;
574235783Skib
575235783Skiberr_unpin:
576235783Skib	i915_gem_object_unpin(obj);
577235783Skib	return ret;
578235783Skib}
579235783Skib
580235783Skibstatic int
581235783Skibi915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
582235783Skib			    struct drm_file *file,
583235783Skib			    struct list_head *objects)
584235783Skib{
585235783Skib	drm_i915_private_t *dev_priv;
586235783Skib	struct drm_i915_gem_object *obj;
587287174Sbapt	struct list_head ordered_objects;
588287174Sbapt	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
589290228Sdumbbell	int retry;
590290228Sdumbbell	int ret;
591235783Skib
592235783Skib	dev_priv = ring->dev->dev_private;
593235783Skib	INIT_LIST_HEAD(&ordered_objects);
594235783Skib	while (!list_empty(objects)) {
595235783Skib		struct drm_i915_gem_exec_object2 *entry;
596235783Skib		bool need_fence, need_mappable;
597235783Skib
598235783Skib		obj = list_first_entry(objects,
599235783Skib				       struct drm_i915_gem_object,
600235783Skib				       exec_list);
601235783Skib		entry = obj->exec_entry;
602235783Skib
603235783Skib		need_fence =
604235783Skib			has_fenced_gpu_access &&
605235783Skib			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
606235783Skib			obj->tiling_mode != I915_TILING_NONE;
607277487Skib		need_mappable = need_fence || need_reloc_mappable(obj);
608235783Skib
609235783Skib		if (need_mappable)
610235783Skib			list_move(&obj->exec_list, &ordered_objects);
611235783Skib		else
612235783Skib			list_move_tail(&obj->exec_list, &ordered_objects);
613235783Skib
614235783Skib		obj->base.pending_read_domains = 0;
615235783Skib		obj->base.pending_write_domain = 0;
616235783Skib	}
617235783Skib	list_splice(&ordered_objects, objects);
618235783Skib
619235783Skib	/* Attempt to pin all of the buffers into the GTT.
620235783Skib	 * This is done in 3 phases:
621235783Skib	 *
622235783Skib	 * 1a. Unbind all objects that do not match the GTT constraints for
623235783Skib	 *     the execbuffer (fenceable, mappable, alignment etc).
624287174Sbapt	 * 1b. Increment pin count for already bound objects.
625235783Skib	 * 2.  Bind new objects.
626235783Skib	 * 3.  Decrement pin count.
627235783Skib	 *
628287174Sbapt	 * This avoid unnecessary unbinding of later objects in order to make
629235783Skib	 * room for the earlier objects *unless* we need to defragment.
630235783Skib	 */
631235783Skib	retry = 0;
632235783Skib	do {
633235783Skib		ret = 0;
634235783Skib
635235783Skib		/* Unbind any ill-fitting objects or pin. */
636235783Skib		list_for_each_entry(obj, objects, exec_list) {
637235783Skib			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
638235783Skib			bool need_fence, need_mappable;
639235783Skib
640235783Skib			if (!obj->gtt_space)
641235783Skib				continue;
642235783Skib
643235783Skib			need_fence =
644235783Skib				has_fenced_gpu_access &&
645235783Skib				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
646235783Skib				obj->tiling_mode != I915_TILING_NONE;
647277487Skib			need_mappable = need_fence || need_reloc_mappable(obj);
648235783Skib
649235783Skib			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
650235783Skib			    (need_mappable && !obj->map_and_fenceable))
651235783Skib				ret = i915_gem_object_unbind(obj);
652235783Skib			else
653235783Skib				ret = pin_and_fence_object(obj, ring);
654235783Skib			if (ret)
655235783Skib				goto err;
656235783Skib		}
657235783Skib
658235783Skib		/* Bind fresh objects */
659235783Skib		list_for_each_entry(obj, objects, exec_list) {
660235783Skib			if (obj->gtt_space)
661235783Skib				continue;
662235783Skib
663235783Skib			ret = pin_and_fence_object(obj, ring);
664235783Skib			if (ret) {
665235783Skib				int ret_ignore;
666235783Skib
667235783Skib				/* This can potentially raise a harmless
668235783Skib				 * -EINVAL if we failed to bind in the above
669235783Skib				 * call. It cannot raise -EINTR since we know
670235783Skib				 * that the bo is freshly bound and so will
671235783Skib				 * not need to be flushed or waited upon.
672235783Skib				 */
673235783Skib				ret_ignore = i915_gem_object_unbind(obj);
674235783Skib				(void)ret_ignore;
675235783Skib				if (obj->gtt_space != NULL)
676235783Skib					printf("%s: gtt_space\n", __func__);
677235783Skib				break;
678235783Skib			}
679235783Skib		}
680235783Skib
681235783Skib		/* Decrement pin count for bound objects */
682235783Skib		list_for_each_entry(obj, objects, exec_list) {
683235783Skib			struct drm_i915_gem_exec_object2 *entry;
684235783Skib
685235783Skib			if (!obj->gtt_space)
686235783Skib				continue;
687235783Skib
688235783Skib			entry = obj->exec_entry;
689235783Skib			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
690235783Skib				i915_gem_object_unpin_fence(obj);
691235783Skib				entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
692235783Skib			}
693235783Skib
694235783Skib			i915_gem_object_unpin(obj);
695235783Skib
696235783Skib			/* ... and ensure ppgtt mapping exist if needed. */
697235783Skib			if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
698235783Skib				i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
699235783Skib						       obj, obj->cache_level);
700235783Skib
701235783Skib				obj->has_aliasing_ppgtt_mapping = 1;
702235783Skib			}
703235783Skib		}
704235783Skib
705235783Skib		if (ret != -ENOSPC || retry > 1)
706235783Skib			return ret;
707235783Skib
708235783Skib		/* First attempt, just clear anything that is purgeable.
709235783Skib		 * Second attempt, clear the entire GTT.
710235783Skib		 */
711235783Skib		ret = i915_gem_evict_everything(ring->dev, retry == 0);
712235783Skib		if (ret)
713235783Skib			return ret;
714235783Skib
715235783Skib		retry++;
716235783Skib	} while (1);
717235783Skib
718235783Skiberr:
719235783Skib	list_for_each_entry_continue_reverse(obj, objects, exec_list) {
720235783Skib		struct drm_i915_gem_exec_object2 *entry;
721235783Skib
722235783Skib		if (!obj->gtt_space)
723235783Skib			continue;
724235783Skib
725235783Skib		entry = obj->exec_entry;
726235783Skib		if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
727235783Skib			i915_gem_object_unpin_fence(obj);
728235783Skib			entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
729235783Skib		}
730235783Skib
731235783Skib		i915_gem_object_unpin(obj);
732235783Skib	}
733235783Skib
734235783Skib	return ret;
735235783Skib}
736235783Skib
737235783Skibstatic int
738235783Skibi915_gem_execbuffer_relocate_slow(struct drm_device *dev,
739287174Sbapt				  struct drm_file *file,
740287174Sbapt				  struct intel_ring_buffer *ring,
741287174Sbapt				  struct list_head *objects,
742287174Sbapt				  struct eb_objects *eb,
743287174Sbapt				  struct drm_i915_gem_exec_object2 *exec,
744287174Sbapt				  int count)
745235783Skib{
746235783Skib	struct drm_i915_gem_relocation_entry *reloc;
747235783Skib	struct drm_i915_gem_object *obj;
748235783Skib	int *reloc_offset;
749235783Skib	int i, total, ret;
750235783Skib
751235783Skib	/* We may process another execbuffer during the unlock... */
752235783Skib	while (!list_empty(objects)) {
753235783Skib		obj = list_first_entry(objects,
754235783Skib				       struct drm_i915_gem_object,
755235783Skib				       exec_list);
756235783Skib		list_del_init(&obj->exec_list);
757235783Skib		drm_gem_object_unreference(&obj->base);
758235783Skib	}
759235783Skib
760235783Skib	DRM_UNLOCK(dev);
761235783Skib
762235783Skib	total = 0;
763235783Skib	for (i = 0; i < count; i++)
764235783Skib		total += exec[i].relocation_count;
765235783Skib
766235783Skib	reloc_offset = malloc(count * sizeof(*reloc_offset), DRM_I915_GEM,
767235783Skib	    M_WAITOK | M_ZERO);
768235783Skib	reloc = malloc(total * sizeof(*reloc), DRM_I915_GEM, M_WAITOK | M_ZERO);
769235783Skib
770235783Skib	total = 0;
771235783Skib	for (i = 0; i < count; i++) {
772235783Skib		struct drm_i915_gem_relocation_entry *user_relocs;
773235783Skib
774235783Skib		user_relocs = (void *)(uintptr_t)exec[i].relocs_ptr;
775235783Skib		ret = -copyin(user_relocs, reloc + total,
776235783Skib		    exec[i].relocation_count * sizeof(*reloc));
777235783Skib		if (ret != 0) {
778235783Skib			DRM_LOCK(dev);
779235783Skib			goto err;
780235783Skib		}
781235783Skib
782235783Skib		reloc_offset[i] = total;
783235783Skib		total += exec[i].relocation_count;
784235783Skib	}
785235783Skib
786235783Skib	ret = i915_mutex_lock_interruptible(dev);
787235783Skib	if (ret) {
788235783Skib		DRM_LOCK(dev);
789235783Skib		goto err;
790235783Skib	}
791235783Skib
792235783Skib	/* reacquire the objects */
793235783Skib	eb_reset(eb);
794235783Skib	for (i = 0; i < count; i++) {
795235783Skib		struct drm_i915_gem_object *obj;
796235783Skib
797235783Skib		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
798235783Skib							exec[i].handle));
799235783Skib		if (&obj->base == NULL) {
800235783Skib			DRM_DEBUG("Invalid object handle %d at index %d\n",
801235783Skib				   exec[i].handle, i);
802235783Skib			ret = -ENOENT;
803235783Skib			goto err;
804235783Skib		}
805235783Skib
806235783Skib		list_add_tail(&obj->exec_list, objects);
807235783Skib		obj->exec_handle = exec[i].handle;
808235783Skib		obj->exec_entry = &exec[i];
809235783Skib		eb_add_object(eb, obj);
810235783Skib	}
811235783Skib
812235783Skib	ret = i915_gem_execbuffer_reserve(ring, file, objects);
813235783Skib	if (ret)
814235783Skib		goto err;
815235783Skib
816235783Skib	list_for_each_entry(obj, objects, exec_list) {
817235783Skib		int offset = obj->exec_entry - exec;
818235783Skib		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
819287174Sbapt							       reloc + reloc_offset[offset]);
820235783Skib		if (ret)
821235783Skib			goto err;
822235783Skib	}
823235783Skib
824235783Skib	/* Leave the user relocations as are, this is the painfully slow path,
825235783Skib	 * and we want to avoid the complication of dropping the lock whilst
826235783Skib	 * having buffers reserved in the aperture and so causing spurious
827235783Skib	 * ENOSPC for random operations.
828235783Skib	 */
829235783Skib
830235783Skiberr:
831235783Skib	free(reloc, DRM_I915_GEM);
832235783Skib	free(reloc_offset, DRM_I915_GEM);
833235783Skib	return ret;
834235783Skib}
835235783Skib
836235783Skibstatic int
837235783Skibi915_gem_execbuffer_flush(struct drm_device *dev,
838235783Skib			  uint32_t invalidate_domains,
839235783Skib			  uint32_t flush_domains,
840235783Skib			  uint32_t flush_rings)
841235783Skib{
842235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
843235783Skib	int i, ret;
844235783Skib
845235783Skib	if (flush_domains & I915_GEM_DOMAIN_CPU)
846235783Skib		intel_gtt_chipset_flush();
847235783Skib
848235783Skib	if (flush_domains & I915_GEM_DOMAIN_GTT)
849235783Skib		wmb();
850235783Skib
851235783Skib	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
852235783Skib		for (i = 0; i < I915_NUM_RINGS; i++)
853235783Skib			if (flush_rings & (1 << i)) {
854235783Skib				ret = i915_gem_flush_ring(&dev_priv->rings[i],
855235783Skib				    invalidate_domains, flush_domains);
856235783Skib				if (ret)
857235783Skib					return ret;
858235783Skib			}
859235783Skib	}
860235783Skib
861235783Skib	return 0;
862235783Skib}
863235783Skib
864235783Skibstatic int
865235783Skibi915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
866235783Skib{
867235783Skib	u32 plane, flip_mask;
868235783Skib	int ret;
869235783Skib
870235783Skib	/* Check for any pending flips. As we only maintain a flip queue depth
871235783Skib	 * of 1, we can simply insert a WAIT for the next display flip prior
872235783Skib	 * to executing the batch and avoid stalling the CPU.
873235783Skib	 */
874235783Skib
875235783Skib	for (plane = 0; flips >> plane; plane++) {
876235783Skib		if (((flips >> plane) & 1) == 0)
877235783Skib			continue;
878235783Skib
879235783Skib		if (plane)
880235783Skib			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
881235783Skib		else
882235783Skib			flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
883235783Skib
884235783Skib		ret = intel_ring_begin(ring, 2);
885235783Skib		if (ret)
886235783Skib			return ret;
887235783Skib
888235783Skib		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
889235783Skib		intel_ring_emit(ring, MI_NOOP);
890235783Skib		intel_ring_advance(ring);
891235783Skib	}
892235783Skib
893235783Skib	return 0;
894235783Skib}
895235783Skib
896235783Skibstatic int
897235783Skibi915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
898235783Skib				struct list_head *objects)
899235783Skib{
900235783Skib	struct drm_i915_gem_object *obj;
901235783Skib	struct change_domains cd;
902235783Skib	int ret;
903235783Skib
904235783Skib	memset(&cd, 0, sizeof(cd));
905235783Skib	list_for_each_entry(obj, objects, exec_list)
906235783Skib		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
907235783Skib
908235783Skib	if (cd.invalidate_domains | cd.flush_domains) {
909235783Skib#if WATCH_EXEC
910235783Skib		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
911235783Skib			  __func__,
912235783Skib			 cd.invalidate_domains,
913235783Skib			 cd.flush_domains);
914235783Skib#endif
915235783Skib		ret = i915_gem_execbuffer_flush(ring->dev,
916235783Skib						cd.invalidate_domains,
917235783Skib						cd.flush_domains,
918235783Skib						cd.flush_rings);
919235783Skib		if (ret)
920235783Skib			return ret;
921235783Skib	}
922235783Skib
923235783Skib	if (cd.flips) {
924235783Skib		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
925235783Skib		if (ret)
926235783Skib			return ret;
927235783Skib	}
928235783Skib
929235783Skib	list_for_each_entry(obj, objects, exec_list) {
930277487Skib		ret = i915_gem_object_sync(obj, ring);
931235783Skib		if (ret)
932235783Skib			return ret;
933235783Skib	}
934235783Skib
935235783Skib	return 0;
936235783Skib}
937235783Skib
938235783Skibstatic bool
939235783Skibi915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
940235783Skib{
941235783Skib	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
942235783Skib}
943235783Skib
944235783Skibstatic int
945235783Skibvalidate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
946289719Sjhb    vm_page_t ***map, int **maplen)
947235783Skib{
948235783Skib	vm_page_t *ma;
949235783Skib	int i, length, page_count;
950235783Skib
951235783Skib	/* XXXKIB various limits checking is missing there */
952235783Skib	*map = malloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO);
953289719Sjhb	*maplen = malloc(count * sizeof(*maplen), DRM_I915_GEM, M_WAITOK |
954289719Sjhb	    M_ZERO);
955235783Skib	for (i = 0; i < count; i++) {
956235783Skib		/* First check for malicious input causing overflow */
957235783Skib		if (exec[i].relocation_count >
958235783Skib		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
959235783Skib			return -EINVAL;
960235783Skib
961235783Skib		length = exec[i].relocation_count *
962290228Sdumbbell			sizeof(struct drm_i915_gem_relocation_entry);
963235783Skib		if (length == 0) {
964235783Skib			(*map)[i] = NULL;
965235783Skib			continue;
966235783Skib		}
967290228Sdumbbell
968235783Skib		/*
969235783Skib		 * Since both start and end of the relocation region
970235783Skib		 * may be not aligned on the page boundary, be
971235783Skib		 * conservative and request a page slot for each
972235783Skib		 * partial page.  Thus +2.
973235783Skib		 */
974235783Skib		page_count = howmany(length, PAGE_SIZE) + 2;
975235783Skib		ma = (*map)[i] = malloc(page_count * sizeof(vm_page_t),
976235783Skib		    DRM_I915_GEM, M_WAITOK | M_ZERO);
977289719Sjhb		(*maplen)[i] = vm_fault_quick_hold_pages(
978289719Sjhb		    &curproc->p_vmspace->vm_map, exec[i].relocs_ptr, length,
979289719Sjhb		    VM_PROT_READ | VM_PROT_WRITE, ma, page_count);
980289719Sjhb		if ((*maplen)[i] == -1) {
981235783Skib			free(ma, DRM_I915_GEM);
982235783Skib			(*map)[i] = NULL;
983290228Sdumbbell			return -EFAULT;
984235783Skib		}
985235783Skib	}
986235783Skib
987235783Skib	return 0;
988235783Skib}
989235783Skib
990235783Skibstatic void
991235783Skibi915_gem_execbuffer_move_to_active(struct list_head *objects,
992235783Skib				   struct intel_ring_buffer *ring,
993235783Skib				   u32 seqno)
994235783Skib{
995235783Skib	struct drm_i915_gem_object *obj;
996235783Skib	uint32_t old_read, old_write;
997235783Skib
998235783Skib	list_for_each_entry(obj, objects, exec_list) {
999235783Skib		old_read = obj->base.read_domains;
1000235783Skib		old_write = obj->base.write_domain;
1001235783Skib
1002235783Skib		obj->base.read_domains = obj->base.pending_read_domains;
1003235783Skib		obj->base.write_domain = obj->base.pending_write_domain;
1004235783Skib		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
1005235783Skib
1006235783Skib		i915_gem_object_move_to_active(obj, ring, seqno);
1007235783Skib		if (obj->base.write_domain) {
1008235783Skib			obj->dirty = 1;
1009235783Skib			obj->pending_gpu_write = true;
1010235783Skib			list_move_tail(&obj->gpu_write_list,
1011235783Skib				       &ring->gpu_write_list);
1012277487Skib			if (obj->pin_count) /* check for potential scanout */
1013277487Skib				intel_mark_busy(ring->dev, obj);
1014235783Skib		}
1015235783Skib		CTR3(KTR_DRM, "object_change_domain move_to_active %p %x %x",
1016235783Skib		    obj, old_read, old_write);
1017235783Skib	}
1018277487Skib
1019277487Skib	intel_mark_busy(ring->dev, NULL);
1020235783Skib}
1021235783Skib
1022235783Skibint i915_gem_sync_exec_requests;
1023235783Skib
1024235783Skibstatic void
1025235783Skibi915_gem_execbuffer_retire_commands(struct drm_device *dev,
1026235783Skib				    struct drm_file *file,
1027235783Skib				    struct intel_ring_buffer *ring)
1028235783Skib{
1029235783Skib	struct drm_i915_gem_request *request;
1030235783Skib	u32 invalidate;
1031235783Skib
1032235783Skib	/*
1033235783Skib	 * Ensure that the commands in the batch buffer are
1034235783Skib	 * finished before the interrupt fires.
1035235783Skib	 *
1036235783Skib	 * The sampler always gets flushed on i965 (sigh).
1037235783Skib	 */
1038235783Skib	invalidate = I915_GEM_DOMAIN_COMMAND;
1039235783Skib	if (INTEL_INFO(dev)->gen >= 4)
1040235783Skib		invalidate |= I915_GEM_DOMAIN_SAMPLER;
1041235783Skib	if (ring->flush(ring, invalidate, 0)) {
1042235783Skib		i915_gem_next_request_seqno(ring);
1043235783Skib		return;
1044235783Skib	}
1045235783Skib
1046235783Skib	/* Add a breadcrumb for the completion of the batch buffer */
1047235783Skib	request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
1048235783Skib	if (request == NULL || i915_add_request(ring, file, request)) {
1049235783Skib		i915_gem_next_request_seqno(ring);
1050235783Skib		free(request, DRM_I915_GEM);
1051277487Skib	} else if (i915_gem_sync_exec_requests) {
1052277487Skib		i915_wait_request(ring, request->seqno);
1053277487Skib		i915_gem_retire_requests(dev);
1054277487Skib	}
1055235783Skib}
1056235783Skib
1057235783Skibstatic void
1058235783Skibi915_gem_fix_mi_batchbuffer_end(struct drm_i915_gem_object *batch_obj,
1059235783Skib    uint32_t batch_start_offset, uint32_t batch_len)
1060235783Skib{
1061235783Skib	char *mkva;
1062235783Skib	uint64_t po_r, po_w;
1063235783Skib	uint32_t cmd;
1064290228Sdumbbell
1065235783Skib	po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset +
1066235783Skib	    batch_start_offset + batch_len;
1067235783Skib	if (batch_len > 0)
1068235783Skib		po_r -= 4;
1069235783Skib	mkva = pmap_mapdev_attr(trunc_page(po_r), 2 * PAGE_SIZE,
1070235783Skib	    PAT_WRITE_COMBINING);
1071236182Skib	po_r &= PAGE_MASK;
1072236182Skib	cmd = *(uint32_t *)(mkva + po_r);
1073235783Skib
1074235783Skib	if (cmd != MI_BATCH_BUFFER_END) {
1075235783Skib		/*
1076235783Skib		 * batch_len != 0 due to the check at the start of
1077235783Skib		 * i915_gem_do_execbuffer
1078235783Skib		 */
1079235783Skib		if (batch_obj->base.size > batch_start_offset + batch_len) {
1080235783Skib			po_w = po_r + 4;
1081235783Skib/* DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END !\n"); */
1082235783Skib		} else {
1083235783Skib			po_w = po_r;
1084235783SkibDRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END, overwriting last bo cmd !\n");
1085235783Skib		}
1086236182Skib		*(uint32_t *)(mkva + po_w) = MI_BATCH_BUFFER_END;
1087235783Skib	}
1088235783Skib
1089235783Skib	pmap_unmapdev((vm_offset_t)mkva, 2 * PAGE_SIZE);
1090235783Skib}
1091235783Skib
1092236183Skibint i915_fix_mi_batchbuffer_end = 0;
1093235783Skib
1094290228Sdumbbellstatic int
1095235783Skibi915_reset_gen7_sol_offsets(struct drm_device *dev,
1096235783Skib			    struct intel_ring_buffer *ring)
1097235783Skib{
1098235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1099235783Skib	int ret, i;
1100235783Skib
1101235783Skib	if (!IS_GEN7(dev) || ring != &dev_priv->rings[RCS])
1102235783Skib		return 0;
1103235783Skib
1104235783Skib	ret = intel_ring_begin(ring, 4 * 3);
1105235783Skib	if (ret)
1106235783Skib		return ret;
1107235783Skib
1108235783Skib	for (i = 0; i < 4; i++) {
1109235783Skib		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1110235783Skib		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1111235783Skib		intel_ring_emit(ring, 0);
1112235783Skib	}
1113235783Skib
1114235783Skib	intel_ring_advance(ring);
1115235783Skib
1116235783Skib	return 0;
1117235783Skib}
1118235783Skib
1119235783Skibstatic int
1120235783Skibi915_gem_do_execbuffer(struct drm_device *dev, void *data,
1121235783Skib		       struct drm_file *file,
1122235783Skib		       struct drm_i915_gem_execbuffer2 *args,
1123235783Skib		       struct drm_i915_gem_exec_object2 *exec)
1124235783Skib{
1125235783Skib	drm_i915_private_t *dev_priv = dev->dev_private;
1126235783Skib	struct list_head objects;
1127235783Skib	struct eb_objects *eb;
1128235783Skib	struct drm_i915_gem_object *batch_obj;
1129235783Skib	struct drm_clip_rect *cliprects = NULL;
1130235783Skib	struct intel_ring_buffer *ring;
1131271705Sdumbbell	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1132235783Skib	u32 exec_start, exec_len;
1133235783Skib	u32 seqno;
1134235783Skib	u32 mask;
1135235783Skib	int ret, mode, i;
1136290228Sdumbbell	vm_page_t **relocs_ma;
1137290228Sdumbbell	int *relocs_len;
1138235783Skib
1139235783Skib	if (!i915_gem_check_execbuffer(args)) {
1140235783Skib		DRM_DEBUG("execbuf with invalid offset/length\n");
1141235783Skib		return -EINVAL;
1142235783Skib	}
1143235783Skib
1144235783Skib	if (args->batch_len == 0)
1145235783Skib		return (0);
1146235783Skib
1147290228Sdumbbell	ret = validate_exec_list(exec, args->buffer_count,
1148290228Sdumbbell	    &relocs_ma, &relocs_len);
1149290228Sdumbbell	if (ret)
1150290228Sdumbbell		goto pre_mutex_err;
1151235783Skib
1152235783Skib	switch (args->flags & I915_EXEC_RING_MASK) {
1153235783Skib	case I915_EXEC_DEFAULT:
1154235783Skib	case I915_EXEC_RENDER:
1155235783Skib		ring = &dev_priv->rings[RCS];
1156235783Skib		break;
1157235783Skib	case I915_EXEC_BSD:
1158235783Skib		ring = &dev_priv->rings[VCS];
1159271705Sdumbbell		if (ctx_id != 0) {
1160271705Sdumbbell			DRM_DEBUG("Ring %s doesn't support contexts\n",
1161271705Sdumbbell				  ring->name);
1162288452Sjhb			ret = -EPERM;
1163290228Sdumbbell			goto pre_mutex_err;
1164271705Sdumbbell		}
1165235783Skib		break;
1166235783Skib	case I915_EXEC_BLT:
1167235783Skib		ring = &dev_priv->rings[BCS];
1168271705Sdumbbell		if (ctx_id != 0) {
1169271705Sdumbbell			DRM_DEBUG("Ring %s doesn't support contexts\n",
1170271705Sdumbbell				  ring->name);
1171288452Sjhb			ret = -EPERM;
1172290228Sdumbbell			goto pre_mutex_err;
1173271705Sdumbbell		}
1174235783Skib		break;
1175235783Skib	default:
1176235783Skib		DRM_DEBUG("execbuf with unknown ring: %d\n",
1177235783Skib			  (int)(args->flags & I915_EXEC_RING_MASK));
1178235783Skib		ret = -EINVAL;
1179290228Sdumbbell		goto pre_mutex_err;
1180235783Skib	}
1181277487Skib	if (!intel_ring_initialized(ring)) {
1182277487Skib		DRM_DEBUG("execbuf with invalid ring: %d\n",
1183277487Skib			  (int)(args->flags & I915_EXEC_RING_MASK));
1184288452Sjhb		ret = -EINVAL;
1185290228Sdumbbell		goto pre_mutex_err;
1186277487Skib	}
1187235783Skib
1188235783Skib	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1189235783Skib	mask = I915_EXEC_CONSTANTS_MASK;
1190235783Skib	switch (mode) {
1191235783Skib	case I915_EXEC_CONSTANTS_REL_GENERAL:
1192235783Skib	case I915_EXEC_CONSTANTS_ABSOLUTE:
1193235783Skib	case I915_EXEC_CONSTANTS_REL_SURFACE:
1194235783Skib		if (ring == &dev_priv->rings[RCS] &&
1195235783Skib		    mode != dev_priv->relative_constants_mode) {
1196235783Skib			if (INTEL_INFO(dev)->gen < 4) {
1197235783Skib				ret = -EINVAL;
1198290228Sdumbbell				goto pre_mutex_err;
1199235783Skib			}
1200235783Skib
1201235783Skib			if (INTEL_INFO(dev)->gen > 5 &&
1202235783Skib			    mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1203235783Skib				ret = -EINVAL;
1204290228Sdumbbell				goto pre_mutex_err;
1205235783Skib			}
1206235783Skib
1207235783Skib			/* The HW changed the meaning on this bit on gen6 */
1208235783Skib			if (INTEL_INFO(dev)->gen >= 6)
1209235783Skib				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1210235783Skib		}
1211235783Skib		break;
1212235783Skib	default:
1213235783Skib		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1214235783Skib		ret = -EINVAL;
1215290228Sdumbbell		goto pre_mutex_err;
1216235783Skib	}
1217235783Skib
1218235783Skib	if (args->buffer_count < 1) {
1219235783Skib		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1220235783Skib		ret = -EINVAL;
1221290228Sdumbbell		goto pre_mutex_err;
1222235783Skib	}
1223235783Skib
1224235783Skib	if (args->num_cliprects != 0) {
1225235783Skib		if (ring != &dev_priv->rings[RCS]) {
1226287174Sbapt			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1227235783Skib			ret = -EINVAL;
1228290228Sdumbbell			goto pre_mutex_err;
1229235783Skib		}
1230235783Skib
1231277487Skib		if (INTEL_INFO(dev)->gen >= 5) {
1232277487Skib			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1233277487Skib			ret = -EINVAL;
1234290228Sdumbbell			goto pre_mutex_err;
1235277487Skib		}
1236277487Skib
1237235783Skib		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1238235783Skib			DRM_DEBUG("execbuf with %u cliprects\n",
1239235783Skib				  args->num_cliprects);
1240235783Skib			ret = -EINVAL;
1241290228Sdumbbell			goto pre_mutex_err;
1242235783Skib		}
1243290228Sdumbbell		cliprects = malloc(args->num_cliprects * sizeof(*cliprects),
1244290228Sdumbbell				    DRM_I915_GEM, M_WAITOK | M_ZERO);
1245235783Skib		ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects,
1246235783Skib		    sizeof(*cliprects) * args->num_cliprects);
1247235783Skib		if (ret != 0)
1248290228Sdumbbell			goto pre_mutex_err;
1249235783Skib	}
1250235783Skib
1251235783Skib	ret = i915_mutex_lock_interruptible(dev);
1252235783Skib	if (ret)
1253290228Sdumbbell		goto pre_mutex_err;
1254235783Skib
1255235783Skib	if (dev_priv->mm.suspended) {
1256280183Sdumbbell		DRM_UNLOCK(dev);
1257235783Skib		ret = -EBUSY;
1258290228Sdumbbell		goto pre_mutex_err;
1259235783Skib	}
1260235783Skib
1261235783Skib	eb = eb_create(args->buffer_count);
1262235783Skib	if (eb == NULL) {
1263280183Sdumbbell		DRM_UNLOCK(dev);
1264235783Skib		ret = -ENOMEM;
1265290228Sdumbbell		goto pre_mutex_err;
1266235783Skib	}
1267235783Skib
1268235783Skib	/* Look up object handles */
1269235783Skib	INIT_LIST_HEAD(&objects);
1270235783Skib	for (i = 0; i < args->buffer_count; i++) {
1271235783Skib		struct drm_i915_gem_object *obj;
1272287174Sbapt
1273235783Skib		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1274235783Skib							exec[i].handle));
1275235783Skib		if (&obj->base == NULL) {
1276235783Skib			DRM_DEBUG("Invalid object handle %d at index %d\n",
1277235783Skib				   exec[i].handle, i);
1278235783Skib			/* prevent error path from reading uninitialized data */
1279235783Skib			ret = -ENOENT;
1280235783Skib			goto err;
1281235783Skib		}
1282235783Skib
1283235783Skib		if (!list_empty(&obj->exec_list)) {
1284235783Skib			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
1285235783Skib				   obj, exec[i].handle, i);
1286235783Skib			ret = -EINVAL;
1287235783Skib			goto err;
1288235783Skib		}
1289235783Skib
1290235783Skib		list_add_tail(&obj->exec_list, &objects);
1291235783Skib		obj->exec_handle = exec[i].handle;
1292235783Skib		obj->exec_entry = &exec[i];
1293235783Skib		eb_add_object(eb, obj);
1294235783Skib	}
1295235783Skib
1296235783Skib	/* take note of the batch buffer before we might reorder the lists */
1297235783Skib	batch_obj = list_entry(objects.prev,
1298235783Skib			       struct drm_i915_gem_object,
1299235783Skib			       exec_list);
1300235783Skib
1301235783Skib	/* Move the objects en-masse into the GTT, evicting if necessary. */
1302235783Skib	ret = i915_gem_execbuffer_reserve(ring, file, &objects);
1303235783Skib	if (ret)
1304235783Skib		goto err;
1305235783Skib
1306235783Skib	/* The objects are in their final locations, apply the relocations. */
1307235783Skib	ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
1308235783Skib	if (ret) {
1309235783Skib		if (ret == -EFAULT) {
1310235783Skib			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
1311287174Sbapt								&objects, eb,
1312287174Sbapt								exec,
1313287174Sbapt								args->buffer_count);
1314235783Skib			DRM_LOCK_ASSERT(dev);
1315235783Skib		}
1316235783Skib		if (ret)
1317235783Skib			goto err;
1318235783Skib	}
1319235783Skib
1320235783Skib	/* Set the pending read domains for the batch buffer to COMMAND */
1321235783Skib	if (batch_obj->base.pending_write_domain) {
1322235783Skib		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1323235783Skib		ret = -EINVAL;
1324235783Skib		goto err;
1325235783Skib	}
1326235783Skib	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1327235783Skib
1328235783Skib	ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
1329235783Skib	if (ret)
1330235783Skib		goto err;
1331235783Skib
1332271705Sdumbbell	ret = i915_switch_context(ring, file, ctx_id);
1333271705Sdumbbell	if (ret)
1334271705Sdumbbell		goto err;
1335271705Sdumbbell
1336235783Skib	seqno = i915_gem_next_request_seqno(ring);
1337235783Skib	for (i = 0; i < I915_NUM_RINGS - 1; i++) {
1338235783Skib		if (seqno < ring->sync_seqno[i]) {
1339235783Skib			/* The GPU can not handle its semaphore value wrapping,
1340235783Skib			 * so every billion or so execbuffers, we need to stall
1341235783Skib			 * the GPU in order to reset the counters.
1342235783Skib			 */
1343277487Skib			ret = i915_gpu_idle(dev);
1344235783Skib			if (ret)
1345235783Skib				goto err;
1346277487Skib			i915_gem_retire_requests(dev);
1347235783Skib
1348235783Skib			KASSERT(ring->sync_seqno[i] == 0, ("Non-zero sync_seqno"));
1349235783Skib		}
1350235783Skib	}
1351235783Skib
1352235783Skib	if (ring == &dev_priv->rings[RCS] &&
1353235783Skib	    mode != dev_priv->relative_constants_mode) {
1354235783Skib		ret = intel_ring_begin(ring, 4);
1355235783Skib		if (ret)
1356290228Sdumbbell				goto err;
1357235783Skib
1358235783Skib		intel_ring_emit(ring, MI_NOOP);
1359235783Skib		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1360235783Skib		intel_ring_emit(ring, INSTPM);
1361235783Skib		intel_ring_emit(ring, mask << 16 | mode);
1362235783Skib		intel_ring_advance(ring);
1363235783Skib
1364235783Skib		dev_priv->relative_constants_mode = mode;
1365235783Skib	}
1366235783Skib
1367235783Skib	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1368235783Skib		ret = i915_reset_gen7_sol_offsets(dev, ring);
1369235783Skib		if (ret)
1370235783Skib			goto err;
1371235783Skib	}
1372235783Skib
1373235783Skib	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1374235783Skib	exec_len = args->batch_len;
1375235783Skib
1376235783Skib	if (i915_fix_mi_batchbuffer_end) {
1377235783Skib		i915_gem_fix_mi_batchbuffer_end(batch_obj,
1378235783Skib		    args->batch_start_offset, args->batch_len);
1379235783Skib	}
1380235783Skib
1381235783Skib	if (cliprects) {
1382235783Skib		for (i = 0; i < args->num_cliprects; i++) {
1383287177Sbapt			ret = i915_emit_box(dev, &cliprects[i],
1384287174Sbapt					    args->DR1, args->DR4);
1385235783Skib			if (ret)
1386235783Skib				goto err;
1387235783Skib
1388287174Sbapt			ret = ring->dispatch_execbuffer(ring,
1389287174Sbapt							exec_start, exec_len);
1390235783Skib			if (ret)
1391235783Skib				goto err;
1392235783Skib		}
1393235783Skib	} else {
1394287174Sbapt		ret = ring->dispatch_execbuffer(ring,
1395287174Sbapt						exec_start, exec_len);
1396235783Skib		if (ret)
1397235783Skib			goto err;
1398235783Skib	}
1399235783Skib
1400290228Sdumbbell	CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
1401290228Sdumbbell	    exec_start, exec_len);
1402290228Sdumbbell
1403235783Skib	i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1404235783Skib	i915_gem_execbuffer_retire_commands(dev, file, ring);
1405235783Skib
1406235783Skiberr:
1407235783Skib	eb_destroy(eb);
1408235783Skib	while (!list_empty(&objects)) {
1409235783Skib		struct drm_i915_gem_object *obj;
1410235783Skib
1411287174Sbapt		obj = list_first_entry(&objects,
1412287174Sbapt				       struct drm_i915_gem_object,
1413287174Sbapt				       exec_list);
1414235783Skib		list_del_init(&obj->exec_list);
1415235783Skib		drm_gem_object_unreference(&obj->base);
1416235783Skib	}
1417290228Sdumbbell
1418235783Skib	DRM_UNLOCK(dev);
1419235783Skib
1420290228Sdumbbellpre_mutex_err:
1421235783Skib	for (i = 0; i < args->buffer_count; i++) {
1422235783Skib		if (relocs_ma[i] != NULL) {
1423289719Sjhb			vm_page_unhold_pages(relocs_ma[i], relocs_len[i]);
1424235783Skib			free(relocs_ma[i], DRM_I915_GEM);
1425235783Skib		}
1426235783Skib	}
1427289719Sjhb	free(relocs_len, DRM_I915_GEM);
1428235783Skib	free(relocs_ma, DRM_I915_GEM);
1429235783Skib	free(cliprects, DRM_I915_GEM);
1430235783Skib	return ret;
1431235783Skib}
1432235783Skib
1433235783Skib/*
1434235783Skib * Legacy execbuffer just creates an exec2 list from the original exec object
1435235783Skib * list array and passes it to the real function.
1436235783Skib */
1437235783Skibint
1438235783Skibi915_gem_execbuffer(struct drm_device *dev, void *data,
1439235783Skib		    struct drm_file *file)
1440235783Skib{
1441235783Skib	struct drm_i915_gem_execbuffer *args = data;
1442235783Skib	struct drm_i915_gem_execbuffer2 exec2;
1443235783Skib	struct drm_i915_gem_exec_object *exec_list = NULL;
1444235783Skib	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1445235783Skib	int ret, i;
1446235783Skib
1447235783Skib	DRM_DEBUG("buffers_ptr %d buffer_count %d len %08x\n",
1448235783Skib	    (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1449235783Skib
1450235783Skib	if (args->buffer_count < 1) {
1451235783Skib		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1452235783Skib		return -EINVAL;
1453235783Skib	}
1454235783Skib
1455235783Skib	/* Copy in the exec list from userland */
1456235783Skib	/* XXXKIB user-controlled malloc size */
1457235783Skib	exec_list = malloc(sizeof(*exec_list) * args->buffer_count,
1458235783Skib	    DRM_I915_GEM, M_WAITOK);
1459235783Skib	exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count,
1460235783Skib	    DRM_I915_GEM, M_WAITOK);
1461235783Skib	ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec_list,
1462235783Skib	    sizeof(*exec_list) * args->buffer_count);
1463235783Skib	if (ret != 0) {
1464235783Skib		DRM_DEBUG("copy %d exec entries failed %d\n",
1465235783Skib			  args->buffer_count, ret);
1466235783Skib		free(exec_list, DRM_I915_GEM);
1467235783Skib		free(exec2_list, DRM_I915_GEM);
1468290228Sdumbbell		return ret;
1469235783Skib	}
1470235783Skib
1471235783Skib	for (i = 0; i < args->buffer_count; i++) {
1472235783Skib		exec2_list[i].handle = exec_list[i].handle;
1473235783Skib		exec2_list[i].relocation_count = exec_list[i].relocation_count;
1474235783Skib		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1475235783Skib		exec2_list[i].alignment = exec_list[i].alignment;
1476235783Skib		exec2_list[i].offset = exec_list[i].offset;
1477235783Skib		if (INTEL_INFO(dev)->gen < 4)
1478235783Skib			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1479235783Skib		else
1480235783Skib			exec2_list[i].flags = 0;
1481235783Skib	}
1482235783Skib
1483235783Skib	exec2.buffers_ptr = args->buffers_ptr;
1484235783Skib	exec2.buffer_count = args->buffer_count;
1485235783Skib	exec2.batch_start_offset = args->batch_start_offset;
1486235783Skib	exec2.batch_len = args->batch_len;
1487235783Skib	exec2.DR1 = args->DR1;
1488235783Skib	exec2.DR4 = args->DR4;
1489235783Skib	exec2.num_cliprects = args->num_cliprects;
1490235783Skib	exec2.cliprects_ptr = args->cliprects_ptr;
1491235783Skib	exec2.flags = I915_EXEC_RENDER;
1492271705Sdumbbell	i915_execbuffer2_set_context_id(exec2, 0);
1493235783Skib
1494235783Skib	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1495235783Skib	if (!ret) {
1496235783Skib		/* Copy the new buffer offsets back to the user's exec list. */
1497235783Skib		for (i = 0; i < args->buffer_count; i++)
1498235783Skib			exec_list[i].offset = exec2_list[i].offset;
1499235783Skib		/* ... and back out to userspace */
1500235783Skib		ret = -copyout(exec_list, (void *)(uintptr_t)args->buffers_ptr,
1501235783Skib		    sizeof(*exec_list) * args->buffer_count);
1502235783Skib		if (ret != 0) {
1503235783Skib			DRM_DEBUG("failed to copy %d exec entries "
1504235783Skib				  "back to user (%d)\n",
1505235783Skib				  args->buffer_count, ret);
1506235783Skib		}
1507235783Skib	}
1508235783Skib
1509235783Skib	free(exec_list, DRM_I915_GEM);
1510235783Skib	free(exec2_list, DRM_I915_GEM);
1511235783Skib	return ret;
1512235783Skib}
1513235783Skib
1514235783Skibint
1515235783Skibi915_gem_execbuffer2(struct drm_device *dev, void *data,
1516235783Skib		     struct drm_file *file)
1517235783Skib{
1518235783Skib	struct drm_i915_gem_execbuffer2 *args = data;
1519235783Skib	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1520235783Skib	int ret;
1521235783Skib
1522235783Skib	DRM_DEBUG("buffers_ptr %jx buffer_count %d len %08x\n",
1523235783Skib	    (uintmax_t)args->buffers_ptr, args->buffer_count, args->batch_len);
1524235783Skib
1525235783Skib	if (args->buffer_count < 1 ||
1526235783Skib	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1527235783Skib		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1528235783Skib		return -EINVAL;
1529235783Skib	}
1530235783Skib
1531235783Skib	/* XXXKIB user-controllable malloc size */
1532290228Sdumbbell	exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count,
1533290228Sdumbbell			     DRM_I915_GEM, M_WAITOK);
1534235783Skib	ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec2_list,
1535235783Skib	    sizeof(*exec2_list) * args->buffer_count);
1536235783Skib	if (ret != 0) {
1537235783Skib		DRM_DEBUG("copy %d exec entries failed %d\n",
1538235783Skib			  args->buffer_count, ret);
1539235783Skib		free(exec2_list, DRM_I915_GEM);
1540287174Sbapt		return -EFAULT;
1541235783Skib	}
1542235783Skib
1543235783Skib	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1544235783Skib	if (!ret) {
1545235783Skib		/* Copy the new buffer offsets back to the user's exec list. */
1546235783Skib		ret = -copyout(exec2_list, (void *)(uintptr_t)args->buffers_ptr,
1547235783Skib		    sizeof(*exec2_list) * args->buffer_count);
1548235783Skib		if (ret) {
1549235783Skib			DRM_DEBUG("failed to copy %d exec entries "
1550235783Skib				  "back to user (%d)\n",
1551235783Skib				  args->buffer_count, ret);
1552235783Skib		}
1553235783Skib	}
1554235783Skib
1555235783Skib	free(exec2_list, DRM_I915_GEM);
1556235783Skib	return ret;
1557235783Skib}
1558