1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3
4#include <linux/hardirq.h>
5#include <linux/iosys-map.h>
6#include <linux/kthread.h>
7#include <linux/slab.h>
8#include <linux/vmalloc.h>
9#include <linux/pm_runtime.h>
10
11#include "lima_devfreq.h"
12#include "lima_drv.h"
13#include "lima_sched.h"
14#include "lima_vm.h"
15#include "lima_mmu.h"
16#include "lima_l2_cache.h"
17#include "lima_gem.h"
18#include "lima_trace.h"
19
20struct lima_fence {
21	struct dma_fence base;
22	struct lima_sched_pipe *pipe;
23};
24
25static struct kmem_cache *lima_fence_slab;
26static int lima_fence_slab_refcnt;
27
28int lima_sched_slab_init(void)
29{
30	if (!lima_fence_slab) {
31		lima_fence_slab = kmem_cache_create(
32			"lima_fence", sizeof(struct lima_fence), 0,
33			SLAB_HWCACHE_ALIGN, NULL);
34		if (!lima_fence_slab)
35			return -ENOMEM;
36	}
37
38	lima_fence_slab_refcnt++;
39	return 0;
40}
41
42void lima_sched_slab_fini(void)
43{
44	if (!--lima_fence_slab_refcnt) {
45		kmem_cache_destroy(lima_fence_slab);
46		lima_fence_slab = NULL;
47	}
48}
49
50static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
51{
52	return container_of(fence, struct lima_fence, base);
53}
54
55static const char *lima_fence_get_driver_name(struct dma_fence *fence)
56{
57	return "lima";
58}
59
60static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
61{
62	struct lima_fence *f = to_lima_fence(fence);
63
64	return f->pipe->base.name;
65}
66
67static void lima_fence_release_rcu(struct rcu_head *rcu)
68{
69	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
70	struct lima_fence *fence = to_lima_fence(f);
71
72	kmem_cache_free(lima_fence_slab, fence);
73}
74
75static void lima_fence_release(struct dma_fence *fence)
76{
77	struct lima_fence *f = to_lima_fence(fence);
78
79	call_rcu(&f->base.rcu, lima_fence_release_rcu);
80}
81
82static const struct dma_fence_ops lima_fence_ops = {
83	.get_driver_name = lima_fence_get_driver_name,
84	.get_timeline_name = lima_fence_get_timeline_name,
85	.release = lima_fence_release,
86};
87
88static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
89{
90	struct lima_fence *fence;
91
92	fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
93	if (!fence)
94		return NULL;
95
96	fence->pipe = pipe;
97	dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
98		       pipe->fence_context, ++pipe->fence_seqno);
99
100	return fence;
101}
102
103static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
104{
105	return container_of(job, struct lima_sched_task, base);
106}
107
108static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
109{
110	return container_of(sched, struct lima_sched_pipe, base);
111}
112
113int lima_sched_task_init(struct lima_sched_task *task,
114			 struct lima_sched_context *context,
115			 struct lima_bo **bos, int num_bos,
116			 struct lima_vm *vm)
117{
118	int err, i;
119
120	task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
121	if (!task->bos)
122		return -ENOMEM;
123
124	for (i = 0; i < num_bos; i++)
125		drm_gem_object_get(&bos[i]->base.base);
126
127	err = drm_sched_job_init(&task->base, &context->base, 1, vm);
128	if (err) {
129		kfree(task->bos);
130		return err;
131	}
132
133	drm_sched_job_arm(&task->base);
134
135	task->num_bos = num_bos;
136	task->vm = lima_vm_get(vm);
137
138	return 0;
139}
140
141void lima_sched_task_fini(struct lima_sched_task *task)
142{
143	int i;
144
145	drm_sched_job_cleanup(&task->base);
146
147	if (task->bos) {
148		for (i = 0; i < task->num_bos; i++)
149			drm_gem_object_put(&task->bos[i]->base.base);
150		kfree(task->bos);
151	}
152
153	lima_vm_put(task->vm);
154}
155
156int lima_sched_context_init(struct lima_sched_pipe *pipe,
157			    struct lima_sched_context *context)
158{
159	struct drm_gpu_scheduler *sched = &pipe->base;
160
161	return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
162				     &sched, 1, NULL);
163}
164
165void lima_sched_context_fini(struct lima_sched_pipe *pipe,
166			     struct lima_sched_context *context)
167{
168	drm_sched_entity_destroy(&context->base);
169}
170
171struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
172{
173	struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
174
175	trace_lima_task_submit(task);
176	drm_sched_entity_push_job(&task->base);
177	return fence;
178}
179
180static int lima_pm_busy(struct lima_device *ldev)
181{
182	int ret;
183
184	/* resume GPU if it has been suspended by runtime PM */
185	ret = pm_runtime_resume_and_get(ldev->dev);
186	if (ret < 0)
187		return ret;
188
189	lima_devfreq_record_busy(&ldev->devfreq);
190	return 0;
191}
192
193static void lima_pm_idle(struct lima_device *ldev)
194{
195	lima_devfreq_record_idle(&ldev->devfreq);
196
197	/* GPU can do auto runtime suspend */
198	pm_runtime_mark_last_busy(ldev->dev);
199	pm_runtime_put_autosuspend(ldev->dev);
200}
201
202static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
203{
204	struct lima_sched_task *task = to_lima_task(job);
205	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
206	struct lima_device *ldev = pipe->ldev;
207	struct lima_fence *fence;
208	int i, err;
209
210	/* after GPU reset */
211	if (job->s_fence->finished.error < 0)
212		return NULL;
213
214	fence = lima_fence_create(pipe);
215	if (!fence)
216		return NULL;
217
218	err = lima_pm_busy(ldev);
219	if (err < 0) {
220		dma_fence_put(&fence->base);
221		return NULL;
222	}
223
224	task->fence = &fence->base;
225
226	/* for caller usage of the fence, otherwise irq handler
227	 * may consume the fence before caller use it
228	 */
229	dma_fence_get(task->fence);
230
231	pipe->current_task = task;
232
233	/* this is needed for MMU to work correctly, otherwise GP/PP
234	 * will hang or page fault for unknown reason after running for
235	 * a while.
236	 *
237	 * Need to investigate:
238	 * 1. is it related to TLB
239	 * 2. how much performance will be affected by L2 cache flush
240	 * 3. can we reduce the calling of this function because all
241	 *    GP/PP use the same L2 cache on mali400
242	 *
243	 * TODO:
244	 * 1. move this to task fini to save some wait time?
245	 * 2. when GP/PP use different l2 cache, need PP wait GP l2
246	 *    cache flush?
247	 */
248	for (i = 0; i < pipe->num_l2_cache; i++)
249		lima_l2_cache_flush(pipe->l2_cache[i]);
250
251	lima_vm_put(pipe->current_vm);
252	pipe->current_vm = lima_vm_get(task->vm);
253
254	if (pipe->bcast_mmu)
255		lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
256	else {
257		for (i = 0; i < pipe->num_mmu; i++)
258			lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
259	}
260
261	trace_lima_task_run(task);
262
263	pipe->error = false;
264	pipe->task_run(pipe, task);
265
266	return task->fence;
267}
268
269static void lima_sched_build_error_task_list(struct lima_sched_task *task)
270{
271	struct lima_sched_error_task *et;
272	struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
273	struct lima_ip *ip = pipe->processor[0];
274	int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
275	struct lima_device *dev = ip->dev;
276	struct lima_sched_context *sched_ctx =
277		container_of(task->base.entity,
278			     struct lima_sched_context, base);
279	struct lima_ctx *ctx =
280		container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
281	struct lima_dump_task *dt;
282	struct lima_dump_chunk *chunk;
283	struct lima_dump_chunk_pid *pid_chunk;
284	struct lima_dump_chunk_buffer *buffer_chunk;
285	u32 size, task_size, mem_size;
286	int i;
287	struct iosys_map map;
288	int ret;
289
290	mutex_lock(&dev->error_task_list_lock);
291
292	if (dev->dump.num_tasks >= lima_max_error_tasks) {
293		dev_info(dev->dev, "fail to save task state from %s pid %d: "
294			 "error task list is full\n", ctx->pname, ctx->pid);
295		goto out;
296	}
297
298	/* frame chunk */
299	size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
300	/* process name chunk */
301	size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
302	/* pid chunk */
303	size += sizeof(struct lima_dump_chunk);
304	/* buffer chunks */
305	for (i = 0; i < task->num_bos; i++) {
306		struct lima_bo *bo = task->bos[i];
307
308		size += sizeof(struct lima_dump_chunk);
309		size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
310	}
311
312	task_size = size + sizeof(struct lima_dump_task);
313	mem_size = task_size + sizeof(*et);
314	et = kvmalloc(mem_size, GFP_KERNEL);
315	if (!et) {
316		dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
317			mem_size);
318		goto out;
319	}
320
321	et->data = et + 1;
322	et->size = task_size;
323
324	dt = et->data;
325	memset(dt, 0, sizeof(*dt));
326	dt->id = pipe_id;
327	dt->size = size;
328
329	chunk = (struct lima_dump_chunk *)(dt + 1);
330	memset(chunk, 0, sizeof(*chunk));
331	chunk->id = LIMA_DUMP_CHUNK_FRAME;
332	chunk->size = pipe->frame_size;
333	memcpy(chunk + 1, task->frame, pipe->frame_size);
334	dt->num_chunks++;
335
336	chunk = (void *)(chunk + 1) + chunk->size;
337	memset(chunk, 0, sizeof(*chunk));
338	chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
339	chunk->size = sizeof(ctx->pname);
340	memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
341	dt->num_chunks++;
342
343	pid_chunk = (void *)(chunk + 1) + chunk->size;
344	memset(pid_chunk, 0, sizeof(*pid_chunk));
345	pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
346	pid_chunk->pid = ctx->pid;
347	dt->num_chunks++;
348
349	buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
350	for (i = 0; i < task->num_bos; i++) {
351		struct lima_bo *bo = task->bos[i];
352		void *data;
353
354		memset(buffer_chunk, 0, sizeof(*buffer_chunk));
355		buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
356		buffer_chunk->va = lima_vm_get_va(task->vm, bo);
357
358		if (bo->heap_size) {
359			buffer_chunk->size = bo->heap_size;
360
361			data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
362				    VM_MAP, pgprot_writecombine(PAGE_KERNEL));
363			if (!data) {
364				kvfree(et);
365				goto out;
366			}
367
368			memcpy(buffer_chunk + 1, data, buffer_chunk->size);
369
370			vunmap(data);
371		} else {
372			buffer_chunk->size = lima_bo_size(bo);
373
374			ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
375			if (ret) {
376				kvfree(et);
377				goto out;
378			}
379
380			memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
381
382			drm_gem_vunmap_unlocked(&bo->base.base, &map);
383		}
384
385		buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
386		dt->num_chunks++;
387	}
388
389	list_add(&et->list, &dev->error_task_list);
390	dev->dump.size += et->size;
391	dev->dump.num_tasks++;
392
393	dev_info(dev->dev, "save error task state success\n");
394
395out:
396	mutex_unlock(&dev->error_task_list_lock);
397}
398
399static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job)
400{
401	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
402	struct lima_sched_task *task = to_lima_task(job);
403	struct lima_device *ldev = pipe->ldev;
404	struct lima_ip *ip = pipe->processor[0];
405	int i;
406
407	/*
408	 * If the GPU managed to complete this jobs fence, the timeout is
409	 * spurious. Bail out.
410	 */
411	if (dma_fence_is_signaled(task->fence)) {
412		DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
413		return DRM_GPU_SCHED_STAT_NOMINAL;
414	}
415
416	/*
417	 * Lima IRQ handler may take a long time to process an interrupt
418	 * if there is another IRQ handler hogging the processing.
419	 * In order to catch such cases and not report spurious Lima job
420	 * timeouts, synchronize the IRQ handler and re-check the fence
421	 * status.
422	 */
423	for (i = 0; i < pipe->num_processor; i++)
424		synchronize_irq(pipe->processor[i]->irq);
425	if (pipe->bcast_processor)
426		synchronize_irq(pipe->bcast_processor->irq);
427
428	if (dma_fence_is_signaled(task->fence)) {
429		DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
430		return DRM_GPU_SCHED_STAT_NOMINAL;
431	}
432
433	/*
434	 * The task might still finish while this timeout handler runs.
435	 * To prevent a race condition on its completion, mask all irqs
436	 * on the running core until the next hard reset completes.
437	 */
438	pipe->task_mask_irq(pipe);
439
440	if (!pipe->error)
441		DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
442
443	drm_sched_stop(&pipe->base, &task->base);
444
445	drm_sched_increase_karma(&task->base);
446
447	if (lima_max_error_tasks)
448		lima_sched_build_error_task_list(task);
449
450	pipe->task_error(pipe);
451
452	if (pipe->bcast_mmu)
453		lima_mmu_page_fault_resume(pipe->bcast_mmu);
454	else {
455		for (i = 0; i < pipe->num_mmu; i++)
456			lima_mmu_page_fault_resume(pipe->mmu[i]);
457	}
458
459	lima_vm_put(pipe->current_vm);
460	pipe->current_vm = NULL;
461	pipe->current_task = NULL;
462
463	lima_pm_idle(ldev);
464
465	drm_sched_resubmit_jobs(&pipe->base);
466	drm_sched_start(&pipe->base, true);
467
468	return DRM_GPU_SCHED_STAT_NOMINAL;
469}
470
471static void lima_sched_free_job(struct drm_sched_job *job)
472{
473	struct lima_sched_task *task = to_lima_task(job);
474	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
475	struct lima_vm *vm = task->vm;
476	struct lima_bo **bos = task->bos;
477	int i;
478
479	dma_fence_put(task->fence);
480
481	for (i = 0; i < task->num_bos; i++)
482		lima_vm_bo_del(vm, bos[i]);
483
484	lima_sched_task_fini(task);
485	kmem_cache_free(pipe->task_slab, task);
486}
487
488static const struct drm_sched_backend_ops lima_sched_ops = {
489	.run_job = lima_sched_run_job,
490	.timedout_job = lima_sched_timedout_job,
491	.free_job = lima_sched_free_job,
492};
493
494static void lima_sched_recover_work(struct work_struct *work)
495{
496	struct lima_sched_pipe *pipe =
497		container_of(work, struct lima_sched_pipe, recover_work);
498	int i;
499
500	for (i = 0; i < pipe->num_l2_cache; i++)
501		lima_l2_cache_flush(pipe->l2_cache[i]);
502
503	if (pipe->bcast_mmu) {
504		lima_mmu_flush_tlb(pipe->bcast_mmu);
505	} else {
506		for (i = 0; i < pipe->num_mmu; i++)
507			lima_mmu_flush_tlb(pipe->mmu[i]);
508	}
509
510	if (pipe->task_recover(pipe))
511		drm_sched_fault(&pipe->base);
512}
513
514int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
515{
516	unsigned int timeout = lima_sched_timeout_ms > 0 ?
517			       lima_sched_timeout_ms : 10000;
518
519	pipe->fence_context = dma_fence_context_alloc(1);
520	spin_lock_init(&pipe->fence_lock);
521
522	INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
523
524	return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
525			      DRM_SCHED_PRIORITY_COUNT,
526			      1,
527			      lima_job_hang_limit,
528			      msecs_to_jiffies(timeout), NULL,
529			      NULL, name, pipe->ldev->dev);
530}
531
532void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
533{
534	drm_sched_fini(&pipe->base);
535}
536
537void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
538{
539	struct lima_sched_task *task = pipe->current_task;
540	struct lima_device *ldev = pipe->ldev;
541
542	if (pipe->error) {
543		if (task && task->recoverable)
544			schedule_work(&pipe->recover_work);
545		else
546			drm_sched_fault(&pipe->base);
547	} else {
548		pipe->task_fini(pipe);
549		dma_fence_signal(task->fence);
550
551		lima_pm_idle(ldev);
552	}
553}
554