1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Etnaviv Project
4 */
5
6#include <drm/drm_file.h>
7#include <linux/dma-fence-array.h>
8#include <linux/file.h>
9#include <linux/pm_runtime.h>
10#include <linux/dma-resv.h>
11#include <linux/sync_file.h>
12#include <linux/uaccess.h>
13#include <linux/vmalloc.h>
14
15#include "etnaviv_cmdbuf.h"
16#include "etnaviv_drv.h"
17#include "etnaviv_gpu.h"
18#include "etnaviv_gem.h"
19#include "etnaviv_perfmon.h"
20#include "etnaviv_sched.h"
21
22/*
23 * Cmdstream submission:
24 */
25
26#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
27/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
28#define BO_LOCKED   0x4000
29#define BO_PINNED   0x2000
30
31static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
32		struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
33{
34	struct etnaviv_gem_submit *submit;
35	size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
36
37	submit = kzalloc(sz, GFP_KERNEL);
38	if (!submit)
39		return NULL;
40
41	submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
42			       GFP_KERNEL);
43	if (!submit->pmrs) {
44		kfree(submit);
45		return NULL;
46	}
47	submit->nr_pmrs = nr_pmrs;
48
49	submit->gpu = gpu;
50	kref_init(&submit->refcount);
51
52	return submit;
53}
54
55static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
56	struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
57	unsigned nr_bos)
58{
59	struct drm_etnaviv_gem_submit_bo *bo;
60	unsigned i;
61	int ret = 0;
62
63	spin_lock(&file->table_lock);
64
65	for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
66		struct drm_gem_object *obj;
67
68		if (bo->flags & BO_INVALID_FLAGS) {
69			DRM_ERROR("invalid flags: %x\n", bo->flags);
70			ret = -EINVAL;
71			goto out_unlock;
72		}
73
74		submit->bos[i].flags = bo->flags;
75		if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
76			if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
77				DRM_ERROR("invalid softpin address\n");
78				ret = -EINVAL;
79				goto out_unlock;
80			}
81			submit->bos[i].va = bo->presumed;
82		}
83
84		/* normally use drm_gem_object_lookup(), but for bulk lookup
85		 * all under single table_lock just hit object_idr directly:
86		 */
87		obj = idr_find(&file->object_idr, bo->handle);
88		if (!obj) {
89			DRM_ERROR("invalid handle %u at index %u\n",
90				  bo->handle, i);
91			ret = -EINVAL;
92			goto out_unlock;
93		}
94
95		/*
96		 * Take a refcount on the object. The file table lock
97		 * prevents the object_idr's refcount on this being dropped.
98		 */
99		drm_gem_object_get(obj);
100
101		submit->bos[i].obj = to_etnaviv_bo(obj);
102	}
103
104out_unlock:
105	submit->nr_bos = i;
106	spin_unlock(&file->table_lock);
107
108	return ret;
109}
110
111static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
112{
113	if (submit->bos[i].flags & BO_LOCKED) {
114		struct drm_gem_object *obj = &submit->bos[i].obj->base;
115
116		dma_resv_unlock(obj->resv);
117		submit->bos[i].flags &= ~BO_LOCKED;
118	}
119}
120
121static int submit_lock_objects(struct etnaviv_gem_submit *submit,
122		struct ww_acquire_ctx *ticket)
123{
124	int contended, slow_locked = -1, i, ret = 0;
125
126retry:
127	for (i = 0; i < submit->nr_bos; i++) {
128		struct drm_gem_object *obj = &submit->bos[i].obj->base;
129
130		if (slow_locked == i)
131			slow_locked = -1;
132
133		contended = i;
134
135		if (!(submit->bos[i].flags & BO_LOCKED)) {
136			ret = dma_resv_lock_interruptible(obj->resv, ticket);
137			if (ret == -EALREADY)
138				DRM_ERROR("BO at index %u already on submit list\n",
139					  i);
140			if (ret)
141				goto fail;
142			submit->bos[i].flags |= BO_LOCKED;
143		}
144	}
145
146	ww_acquire_done(ticket);
147
148	return 0;
149
150fail:
151	for (; i >= 0; i--)
152		submit_unlock_object(submit, i);
153
154	if (slow_locked > 0)
155		submit_unlock_object(submit, slow_locked);
156
157	if (ret == -EDEADLK) {
158		struct drm_gem_object *obj;
159
160		obj = &submit->bos[contended].obj->base;
161
162		/* we lost out in a seqno race, lock and retry.. */
163		ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
164		if (!ret) {
165			submit->bos[contended].flags |= BO_LOCKED;
166			slow_locked = contended;
167			goto retry;
168		}
169	}
170
171	return ret;
172}
173
174static int submit_fence_sync(struct etnaviv_gem_submit *submit)
175{
176	int i, ret = 0;
177
178	for (i = 0; i < submit->nr_bos; i++) {
179		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
180		struct dma_resv *robj = bo->obj->base.resv;
181
182		ret = dma_resv_reserve_fences(robj, 1);
183		if (ret)
184			return ret;
185
186		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
187			continue;
188
189		ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
190							      &bo->obj->base,
191							      bo->flags & ETNA_SUBMIT_BO_WRITE);
192		if (ret)
193			return ret;
194	}
195
196	return ret;
197}
198
199static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
200{
201	int i;
202
203	for (i = 0; i < submit->nr_bos; i++) {
204		struct drm_gem_object *obj = &submit->bos[i].obj->base;
205		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
206
207		dma_resv_add_fence(obj->resv, submit->out_fence, write ?
208				   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
209		submit_unlock_object(submit, i);
210	}
211}
212
213static int submit_pin_objects(struct etnaviv_gem_submit *submit)
214{
215	int i, ret = 0;
216
217	for (i = 0; i < submit->nr_bos; i++) {
218		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
219		struct etnaviv_vram_mapping *mapping;
220
221		mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
222						  submit->mmu_context,
223						  submit->bos[i].va);
224		if (IS_ERR(mapping)) {
225			ret = PTR_ERR(mapping);
226			break;
227		}
228
229		if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
230		     submit->bos[i].va != mapping->iova) {
231			etnaviv_gem_mapping_unreference(mapping);
232			return -EINVAL;
233		}
234
235		atomic_inc(&etnaviv_obj->gpu_active);
236
237		submit->bos[i].flags |= BO_PINNED;
238		submit->bos[i].mapping = mapping;
239	}
240
241	return ret;
242}
243
244static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
245	struct etnaviv_gem_submit_bo **bo)
246{
247	if (idx >= submit->nr_bos) {
248		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
249				idx, submit->nr_bos);
250		return -EINVAL;
251	}
252
253	*bo = &submit->bos[idx];
254
255	return 0;
256}
257
258/* process the reloc's and patch up the cmdstream as needed: */
259static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
260		u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
261		u32 nr_relocs)
262{
263	u32 i, last_offset = 0;
264	u32 *ptr = stream;
265	int ret;
266
267	/* Submits using softpin don't blend with relocs */
268	if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
269		return -EINVAL;
270
271	for (i = 0; i < nr_relocs; i++) {
272		const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
273		struct etnaviv_gem_submit_bo *bo;
274		u32 off;
275
276		if (unlikely(r->flags)) {
277			DRM_ERROR("invalid reloc flags\n");
278			return -EINVAL;
279		}
280
281		if (r->submit_offset % 4) {
282			DRM_ERROR("non-aligned reloc offset: %u\n",
283				  r->submit_offset);
284			return -EINVAL;
285		}
286
287		/* offset in dwords: */
288		off = r->submit_offset / 4;
289
290		if ((off >= size ) ||
291				(off < last_offset)) {
292			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
293			return -EINVAL;
294		}
295
296		ret = submit_bo(submit, r->reloc_idx, &bo);
297		if (ret)
298			return ret;
299
300		if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
301			DRM_ERROR("relocation %u outside object\n", i);
302			return -EINVAL;
303		}
304
305		ptr[off] = bo->mapping->iova + r->reloc_offset;
306
307		last_offset = off;
308	}
309
310	return 0;
311}
312
313static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
314		u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
315{
316	u32 i;
317
318	for (i = 0; i < submit->nr_pmrs; i++) {
319		const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
320		struct etnaviv_gem_submit_bo *bo;
321		int ret;
322
323		ret = submit_bo(submit, r->read_idx, &bo);
324		if (ret)
325			return ret;
326
327		/* at offset 0 a sequence number gets stored used for userspace sync */
328		if (r->read_offset == 0) {
329			DRM_ERROR("perfmon request: offset is 0");
330			return -EINVAL;
331		}
332
333		if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
334			DRM_ERROR("perfmon request: offset %u outside object", i);
335			return -EINVAL;
336		}
337
338		if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
339			DRM_ERROR("perfmon request: flags are not valid");
340			return -EINVAL;
341		}
342
343		if (etnaviv_pm_req_validate(r, exec_state)) {
344			DRM_ERROR("perfmon request: domain or signal not valid");
345			return -EINVAL;
346		}
347
348		submit->pmrs[i].flags = r->flags;
349		submit->pmrs[i].domain = r->domain;
350		submit->pmrs[i].signal = r->signal;
351		submit->pmrs[i].sequence = r->sequence;
352		submit->pmrs[i].offset = r->read_offset;
353		submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
354	}
355
356	return 0;
357}
358
359static void submit_cleanup(struct kref *kref)
360{
361	struct etnaviv_gem_submit *submit =
362			container_of(kref, struct etnaviv_gem_submit, refcount);
363	unsigned i;
364
365	if (submit->cmdbuf.suballoc)
366		etnaviv_cmdbuf_free(&submit->cmdbuf);
367
368	if (submit->mmu_context)
369		etnaviv_iommu_context_put(submit->mmu_context);
370
371	if (submit->prev_mmu_context)
372		etnaviv_iommu_context_put(submit->prev_mmu_context);
373
374	for (i = 0; i < submit->nr_bos; i++) {
375		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
376
377		/* unpin all objects */
378		if (submit->bos[i].flags & BO_PINNED) {
379			etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
380			atomic_dec(&etnaviv_obj->gpu_active);
381			submit->bos[i].mapping = NULL;
382			submit->bos[i].flags &= ~BO_PINNED;
383		}
384
385		/* if the GPU submit failed, objects might still be locked */
386		submit_unlock_object(submit, i);
387		drm_gem_object_put(&etnaviv_obj->base);
388	}
389
390	wake_up_all(&submit->gpu->fence_event);
391
392	if (submit->out_fence) {
393		/*
394		 * Remove from user fence array before dropping the reference,
395		 * so fence can not be found in lookup anymore.
396		 */
397		xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
398		dma_fence_put(submit->out_fence);
399	}
400
401	put_pid(submit->pid);
402
403	kfree(submit->pmrs);
404	kfree(submit);
405}
406
407void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
408{
409	kref_put(&submit->refcount, submit_cleanup);
410}
411
412int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
413		struct drm_file *file)
414{
415	struct etnaviv_file_private *ctx = file->driver_priv;
416	struct etnaviv_drm_private *priv = dev->dev_private;
417	struct drm_etnaviv_gem_submit *args = data;
418	struct drm_etnaviv_gem_submit_reloc *relocs;
419	struct drm_etnaviv_gem_submit_pmr *pmrs;
420	struct drm_etnaviv_gem_submit_bo *bos;
421	struct etnaviv_gem_submit *submit;
422	struct etnaviv_gpu *gpu;
423	struct sync_file *sync_file = NULL;
424	struct ww_acquire_ctx ticket;
425	int out_fence_fd = -1;
426	struct pid *pid = get_pid(task_pid(current));
427	void *stream;
428	int ret;
429
430	if (args->pipe >= ETNA_MAX_PIPES)
431		return -EINVAL;
432
433	gpu = priv->gpu[args->pipe];
434	if (!gpu)
435		return -ENXIO;
436
437	if (args->stream_size % 4) {
438		DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
439			  args->stream_size);
440		return -EINVAL;
441	}
442
443	if (args->exec_state != ETNA_PIPE_3D &&
444	    args->exec_state != ETNA_PIPE_2D &&
445	    args->exec_state != ETNA_PIPE_VG) {
446		DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
447		return -EINVAL;
448	}
449
450	if (args->flags & ~ETNA_SUBMIT_FLAGS) {
451		DRM_ERROR("invalid flags: 0x%x\n", args->flags);
452		return -EINVAL;
453	}
454
455	if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
456	    priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
457		DRM_ERROR("softpin requested on incompatible MMU\n");
458		return -EINVAL;
459	}
460
461	if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
462	    args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
463		DRM_ERROR("submit arguments out of size limits\n");
464		return -EINVAL;
465	}
466
467	/*
468	 * Copy the command submission and bo array to kernel space in
469	 * one go, and do this outside of any locks.
470	 */
471	bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
472	relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
473	pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
474	stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
475	if (!bos || !relocs || !pmrs || !stream) {
476		ret = -ENOMEM;
477		goto err_submit_cmds;
478	}
479
480	ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
481			     args->nr_bos * sizeof(*bos));
482	if (ret) {
483		ret = -EFAULT;
484		goto err_submit_cmds;
485	}
486
487	ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
488			     args->nr_relocs * sizeof(*relocs));
489	if (ret) {
490		ret = -EFAULT;
491		goto err_submit_cmds;
492	}
493
494	ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
495			     args->nr_pmrs * sizeof(*pmrs));
496	if (ret) {
497		ret = -EFAULT;
498		goto err_submit_cmds;
499	}
500
501	ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
502			     args->stream_size);
503	if (ret) {
504		ret = -EFAULT;
505		goto err_submit_cmds;
506	}
507
508	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
509		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
510		if (out_fence_fd < 0) {
511			ret = out_fence_fd;
512			goto err_submit_cmds;
513		}
514	}
515
516	ww_acquire_init(&ticket, &reservation_ww_class);
517
518	submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
519	if (!submit) {
520		ret = -ENOMEM;
521		goto err_submit_ww_acquire;
522	}
523
524	submit->pid = pid;
525
526	ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
527				  ALIGN(args->stream_size, 8) + 8);
528	if (ret)
529		goto err_submit_put;
530
531	submit->ctx = file->driver_priv;
532	submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
533	submit->exec_state = args->exec_state;
534	submit->flags = args->flags;
535
536	ret = drm_sched_job_init(&submit->sched_job,
537				 &ctx->sched_entity[args->pipe],
538				 1, submit->ctx);
539	if (ret)
540		goto err_submit_put;
541
542	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
543	if (ret)
544		goto err_submit_job;
545
546	if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
547	    !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
548				      relocs, args->nr_relocs)) {
549		ret = -EINVAL;
550		goto err_submit_job;
551	}
552
553	if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
554		struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
555		if (!in_fence) {
556			ret = -EINVAL;
557			goto err_submit_job;
558		}
559
560		ret = drm_sched_job_add_dependency(&submit->sched_job,
561						   in_fence);
562		if (ret)
563			goto err_submit_job;
564	}
565
566	ret = submit_pin_objects(submit);
567	if (ret)
568		goto err_submit_job;
569
570	ret = submit_reloc(submit, stream, args->stream_size / 4,
571			   relocs, args->nr_relocs);
572	if (ret)
573		goto err_submit_job;
574
575	ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
576	if (ret)
577		goto err_submit_job;
578
579	memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
580
581	ret = submit_lock_objects(submit, &ticket);
582	if (ret)
583		goto err_submit_job;
584
585	ret = submit_fence_sync(submit);
586	if (ret)
587		goto err_submit_job;
588
589	ret = etnaviv_sched_push_job(submit);
590	if (ret)
591		goto err_submit_job;
592
593	submit_attach_object_fences(submit);
594
595	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
596		/*
597		 * This can be improved: ideally we want to allocate the sync
598		 * file before kicking off the GPU job and just attach the
599		 * fence to the sync file here, eliminating the ENOMEM
600		 * possibility at this stage.
601		 */
602		sync_file = sync_file_create(submit->out_fence);
603		if (!sync_file) {
604			ret = -ENOMEM;
605			/*
606			 * When this late error is hit, the submit has already
607			 * been handed over to the scheduler. At this point
608			 * the sched_job must not be cleaned up.
609			 */
610			goto err_submit_put;
611		}
612		fd_install(out_fence_fd, sync_file->file);
613	}
614
615	args->fence_fd = out_fence_fd;
616	args->fence = submit->out_fence_id;
617
618err_submit_job:
619	if (ret)
620		drm_sched_job_cleanup(&submit->sched_job);
621err_submit_put:
622	etnaviv_submit_put(submit);
623
624err_submit_ww_acquire:
625	ww_acquire_fini(&ticket);
626
627err_submit_cmds:
628	if (ret && (out_fence_fd >= 0))
629		put_unused_fd(out_fence_fd);
630	kvfree(stream);
631	kvfree(bos);
632	kvfree(relocs);
633	kvfree(pmrs);
634
635	return ret;
636}
637