1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "cgrp.h"
26#include "chan.h"
27#include "chid.h"
28#include "runl.h"
29#include "runq.h"
30
31#include <core/gpuobj.h>
32#include <subdev/bar.h>
33#include <subdev/fault.h>
34#include <subdev/mc.h>
35#include <subdev/mmu.h>
36#include <engine/sw.h>
37
38#include <nvif/class.h>
39
40void
41gf100_chan_preempt(struct nvkm_chan *chan)
42{
43	nvkm_wr32(chan->cgrp->runl->fifo->engine.subdev.device, 0x002634, chan->id);
44}
45
46static void
47gf100_chan_stop(struct nvkm_chan *chan)
48{
49	struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
50
51	nvkm_mask(device, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
52}
53
54static void
55gf100_chan_start(struct nvkm_chan *chan)
56{
57	struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
58
59	nvkm_wr32(device, 0x003004 + (chan->id * 8), 0x001f0001);
60}
61
62static void gf100_fifo_intr_engine(struct nvkm_fifo *);
63
64static void
65gf100_chan_unbind(struct nvkm_chan *chan)
66{
67	struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
68	struct nvkm_device *device = fifo->engine.subdev.device;
69
70	/*TODO: Is this cargo-culted, or necessary? RM does *something* here... Why? */
71	gf100_fifo_intr_engine(fifo);
72
73	nvkm_wr32(device, 0x003000 + (chan->id * 8), 0x00000000);
74}
75
76static void
77gf100_chan_bind(struct nvkm_chan *chan)
78{
79	struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
80
81	nvkm_wr32(device, 0x003000 + (chan->id * 8), 0xc0000000 | chan->inst->addr >> 12);
82}
83
84static int
85gf100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
86{
87	const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
88	const u32 limit2 = ilog2(length / 8);
89
90	nvkm_kmap(chan->inst);
91	nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd));
92	nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd));
93	nvkm_wo32(chan->inst, 0x10, 0x0000face);
94	nvkm_wo32(chan->inst, 0x30, 0xfffff902);
95	nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset));
96	nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16));
97	nvkm_wo32(chan->inst, 0x54, 0x00000002);
98	nvkm_wo32(chan->inst, 0x84, 0x20400000);
99	nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm);
100	nvkm_wo32(chan->inst, 0x9c, 0x00000100);
101	nvkm_wo32(chan->inst, 0xa4, 0x1f1f1f1f);
102	nvkm_wo32(chan->inst, 0xa8, 0x1f1f1f1f);
103	nvkm_wo32(chan->inst, 0xac, 0x0000001f);
104	nvkm_wo32(chan->inst, 0xb8, 0xf8000000);
105	nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */
106	nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */
107	nvkm_done(chan->inst);
108	return 0;
109}
110
111static const struct nvkm_chan_func_ramfc
112gf100_chan_ramfc = {
113	.write = gf100_chan_ramfc_write,
114	.devm = 0xfff,
115};
116
117void
118gf100_chan_userd_clear(struct nvkm_chan *chan)
119{
120	nvkm_kmap(chan->userd.mem);
121	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x040, 0x00000000);
122	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x044, 0x00000000);
123	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x048, 0x00000000);
124	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x04c, 0x00000000);
125	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x050, 0x00000000);
126	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x058, 0x00000000);
127	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x05c, 0x00000000);
128	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x060, 0x00000000);
129	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x088, 0x00000000);
130	nvkm_wo32(chan->userd.mem, chan->userd.base + 0x08c, 0x00000000);
131	nvkm_done(chan->userd.mem);
132}
133
134static const struct nvkm_chan_func_userd
135gf100_chan_userd = {
136	.bar = 1,
137	.size = 0x1000,
138	.clear = gf100_chan_userd_clear,
139};
140
141const struct nvkm_chan_func_inst
142gf100_chan_inst = {
143	.size = 0x1000,
144	.zero = true,
145	.vmm = true,
146};
147
148static const struct nvkm_chan_func
149gf100_chan = {
150	.inst = &gf100_chan_inst,
151	.userd = &gf100_chan_userd,
152	.ramfc = &gf100_chan_ramfc,
153	.bind = gf100_chan_bind,
154	.unbind = gf100_chan_unbind,
155	.start = gf100_chan_start,
156	.stop = gf100_chan_stop,
157	.preempt = gf100_chan_preempt,
158};
159
160static void
161gf100_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
162{
163	u64 addr = 0ULL;
164	u32 ptr0;
165
166	switch (engn->engine->subdev.type) {
167	case NVKM_ENGINE_SW    : return;
168	case NVKM_ENGINE_GR    : ptr0 = 0x0210; break;
169	case NVKM_ENGINE_CE    : ptr0 = 0x0230 + (engn->engine->subdev.inst * 0x10); break;
170	case NVKM_ENGINE_MSPDEC: ptr0 = 0x0250; break;
171	case NVKM_ENGINE_MSPPP : ptr0 = 0x0260; break;
172	case NVKM_ENGINE_MSVLD : ptr0 = 0x0270; break;
173	default:
174		WARN_ON(1);
175		return;
176	}
177
178	if (cctx) {
179		addr  = cctx->vctx->vma->addr;
180		addr |= 4ULL;
181	}
182
183	nvkm_kmap(chan->inst);
184	nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr));
185	nvkm_wo32(chan->inst, ptr0 + 4, upper_32_bits(addr));
186	nvkm_done(chan->inst);
187}
188
189static int
190gf100_ectx_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
191{
192	int ret;
193
194	ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
195	if (ret)
196		return ret;
197
198	return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
199}
200
201bool
202gf100_engn_mmu_fault_triggered(struct nvkm_engn *engn)
203{
204	struct nvkm_runl *runl = engn->runl;
205	struct nvkm_fifo *fifo = runl->fifo;
206	struct nvkm_device *device = fifo->engine.subdev.device;
207	u32 data = nvkm_rd32(device, 0x002a30 + (engn->id * 4));
208
209	ENGN_DEBUG(engn, "%08x: mmu fault triggered", data);
210	if (!(data & 0x00000100))
211		return false;
212
213	spin_lock(&fifo->lock);
214	nvkm_mask(device, 0x002a30 + (engn->id * 4), 0x00000100, 0x00000000);
215	if (atomic_dec_and_test(&runl->rc_triggered))
216		nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
217	spin_unlock(&fifo->lock);
218	return true;
219}
220
221void
222gf100_engn_mmu_fault_trigger(struct nvkm_engn *engn)
223{
224	struct nvkm_runl *runl = engn->runl;
225	struct nvkm_fifo *fifo = runl->fifo;
226	struct nvkm_device *device = fifo->engine.subdev.device;
227
228	ENGN_DEBUG(engn, "triggering mmu fault on 0x%02x", engn->fault);
229	spin_lock(&fifo->lock);
230	if (atomic_inc_return(&runl->rc_triggered) == 1)
231		nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
232	nvkm_wr32(device, 0x002100, 0x00000100);
233	nvkm_wr32(device, 0x002a30 + (engn->id * 4), 0x00000100 | engn->fault);
234	spin_unlock(&fifo->lock);
235}
236
237/*TODO: clean all this up. */
238struct gf100_engn_status {
239	bool busy;
240	bool save;
241	bool unk0;
242	bool unk1;
243	u8   chid;
244};
245
246static void
247gf100_engn_status(struct nvkm_engn *engn, struct gf100_engn_status *status)
248{
249	u32 stat = nvkm_rd32(engn->engine->subdev.device, 0x002640 + (engn->id * 4));
250
251	status->busy = (stat & 0x10000000);
252	status->save = (stat & 0x00100000);
253	status->unk0 = (stat & 0x00004000);
254	status->unk1 = (stat & 0x00001000);
255	status->chid = (stat & 0x0000007f);
256
257	ENGN_DEBUG(engn, "%08x: busy %d save %d unk0 %d unk1 %d chid %d",
258		   stat, status->busy, status->save, status->unk0, status->unk1, status->chid);
259}
260
261static int
262gf100_engn_cxid(struct nvkm_engn *engn, bool *cgid)
263{
264	struct gf100_engn_status status;
265
266	gf100_engn_status(engn, &status);
267	if (status.busy) {
268		*cgid = false;
269		return status.chid;
270	}
271
272	return -ENODEV;
273}
274
275static bool
276gf100_engn_chsw(struct nvkm_engn *engn)
277{
278	struct gf100_engn_status status;
279
280	gf100_engn_status(engn, &status);
281	if (status.busy && (status.unk0 || status.unk1))
282		return true;
283
284	return false;
285}
286
287static const struct nvkm_engn_func
288gf100_engn = {
289	.chsw = gf100_engn_chsw,
290	.cxid = gf100_engn_cxid,
291	.mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
292	.mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
293	.ctor = gf100_ectx_ctor,
294	.bind = gf100_ectx_bind,
295};
296
297const struct nvkm_engn_func
298gf100_engn_sw = {
299};
300
301static const struct nvkm_bitfield
302gf100_runq_intr_0_names[] = {
303/*	{ 0x00008000, "" }	seen with null ib push */
304	{ 0x00200000, "ILLEGAL_MTHD" },
305	{ 0x00800000, "EMPTY_SUBC" },
306	{}
307};
308
309bool
310gf100_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null)
311{
312	struct nvkm_subdev *subdev = &runq->fifo->engine.subdev;
313	struct nvkm_device *device = subdev->device;
314	u32 mask = nvkm_rd32(device, 0x04010c + (runq->id * 0x2000));
315	u32 stat = nvkm_rd32(device, 0x040108 + (runq->id * 0x2000)) & mask;
316	u32 addr = nvkm_rd32(device, 0x0400c0 + (runq->id * 0x2000));
317	u32 data = nvkm_rd32(device, 0x0400c4 + (runq->id * 0x2000));
318	u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & runq->fifo->chid->mask;
319	u32 subc = (addr & 0x00070000) >> 16;
320	u32 mthd = (addr & 0x00003ffc);
321	u32 show = stat;
322	struct nvkm_chan *chan;
323	unsigned long flags;
324	char msg[128];
325
326	if (stat & 0x00800000) {
327		if (device->sw) {
328			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
329				show &= ~0x00800000;
330		}
331	}
332
333	if (show) {
334		nvkm_snprintbf(msg, sizeof(msg), runq->func->intr_0_names, show);
335		chan = nvkm_chan_get_chid(&runq->fifo->engine, chid, &flags);
336		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
337				   "subc %d mthd %04x data %08x\n",
338			   runq->id, show, msg, chid, chan ? chan->inst->addr : 0,
339			   chan ? chan->name : "unknown", subc, mthd, data);
340
341		/*TODO: use proper procedure for clearing each exception / debug output */
342		if ((stat & 0xc67fe000) && chan)
343			nvkm_chan_error(chan, true);
344		nvkm_chan_put(&chan, flags);
345	}
346
347	nvkm_wr32(device, 0x0400c0 + (runq->id * 0x2000), 0x80600008);
348	nvkm_wr32(device, 0x040108 + (runq->id * 0x2000), stat);
349	return true;
350}
351
352void
353gf100_runq_init(struct nvkm_runq *runq)
354{
355	struct nvkm_device *device = runq->fifo->engine.subdev.device;
356
357	nvkm_mask(device, 0x04013c + (runq->id * 0x2000), 0x10000100, 0x00000000);
358	nvkm_wr32(device, 0x040108 + (runq->id * 0x2000), 0xffffffff); /* INTR */
359	nvkm_wr32(device, 0x04010c + (runq->id * 0x2000), 0xfffffeff); /* INTREN */
360}
361
362static const struct nvkm_runq_func
363gf100_runq = {
364	.init = gf100_runq_init,
365	.intr = gf100_runq_intr,
366	.intr_0_names = gf100_runq_intr_0_names,
367};
368
369bool
370gf100_runl_preempt_pending(struct nvkm_runl *runl)
371{
372	return nvkm_rd32(runl->fifo->engine.subdev.device, 0x002634) & 0x00100000;
373}
374
375static void
376gf100_runl_fault_clear(struct nvkm_runl *runl)
377{
378	nvkm_mask(runl->fifo->engine.subdev.device, 0x00262c, 0x00000000, 0x00000000);
379}
380
381static void
382gf100_runl_allow(struct nvkm_runl *runl, u32 engm)
383{
384	nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, 0x00000000);
385}
386
387static void
388gf100_runl_block(struct nvkm_runl *runl, u32 engm)
389{
390	nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, engm);
391}
392
393static bool
394gf100_runl_pending(struct nvkm_runl *runl)
395{
396	return nvkm_rd32(runl->fifo->engine.subdev.device, 0x00227c) & 0x00100000;
397}
398
399static void
400gf100_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
401{
402	struct nvkm_device *device = runl->fifo->engine.subdev.device;
403	u64 addr = nvkm_memory_addr(memory) + start;
404	int target;
405
406	switch (nvkm_memory_target(memory)) {
407	case NVKM_MEM_TARGET_VRAM: target = 0; break;
408	case NVKM_MEM_TARGET_NCOH: target = 3; break;
409	default:
410		WARN_ON(1);
411		return;
412	}
413
414	nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12));
415	nvkm_wr32(device, 0x002274, 0x01f00000 | count);
416}
417
418static void
419gf100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
420{
421	nvkm_wo32(memory, offset + 0, chan->id);
422	nvkm_wo32(memory, offset + 4, 0x00000004);
423}
424
425static const struct nvkm_runl_func
426gf100_runl = {
427	.size = 8,
428	.update = nv50_runl_update,
429	.insert_chan = gf100_runl_insert_chan,
430	.commit = gf100_runl_commit,
431	.wait = nv50_runl_wait,
432	.pending = gf100_runl_pending,
433	.block = gf100_runl_block,
434	.allow = gf100_runl_allow,
435	.fault_clear = gf100_runl_fault_clear,
436	.preempt_pending = gf100_runl_preempt_pending,
437};
438
439static void
440gf100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
441{
442	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
443	unsigned long flags;
444
445	spin_lock_irqsave(&fifo->lock, flags);
446	nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x80000000, 0x80000000);
447	spin_unlock_irqrestore(&fifo->lock, flags);
448}
449
450static void
451gf100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
452{
453	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
454	unsigned long flags;
455
456	spin_lock_irqsave(&fifo->lock, flags);
457	nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x80000000, 0x00000000);
458	spin_unlock_irqrestore(&fifo->lock, flags);
459}
460
461const struct nvkm_event_func
462gf100_fifo_nonstall = {
463	.init = gf100_fifo_nonstall_allow,
464	.fini = gf100_fifo_nonstall_block,
465};
466
467static const struct nvkm_enum
468gf100_fifo_mmu_fault_engine[] = {
469	{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
470	{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
471	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
472	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
473	{ 0x07, "PFIFO" },
474	{ 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
475	{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
476	{ 0x13, "PCOUNTER" },
477	{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
478	{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
479	{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
480	{ 0x17, "PMU" },
481	{}
482};
483
484static const struct nvkm_enum
485gf100_fifo_mmu_fault_reason[] = {
486	{ 0x00, "PT_NOT_PRESENT" },
487	{ 0x01, "PT_TOO_SHORT" },
488	{ 0x02, "PAGE_NOT_PRESENT" },
489	{ 0x03, "VM_LIMIT_EXCEEDED" },
490	{ 0x04, "NO_CHANNEL" },
491	{ 0x05, "PAGE_SYSTEM_ONLY" },
492	{ 0x06, "PAGE_READ_ONLY" },
493	{ 0x0a, "COMPRESSED_SYSRAM" },
494	{ 0x0c, "INVALID_STORAGE_TYPE" },
495	{}
496};
497
498static const struct nvkm_enum
499gf100_fifo_mmu_fault_hubclient[] = {
500	{ 0x01, "PCOPY0" },
501	{ 0x02, "PCOPY1" },
502	{ 0x04, "DISPATCH" },
503	{ 0x05, "CTXCTL" },
504	{ 0x06, "PFIFO" },
505	{ 0x07, "BAR_READ" },
506	{ 0x08, "BAR_WRITE" },
507	{ 0x0b, "PVP" },
508	{ 0x0c, "PMSPPP" },
509	{ 0x0d, "PMSVLD" },
510	{ 0x11, "PCOUNTER" },
511	{ 0x12, "PMU" },
512	{ 0x14, "CCACHE" },
513	{ 0x15, "CCACHE_POST" },
514	{}
515};
516
517static const struct nvkm_enum
518gf100_fifo_mmu_fault_gpcclient[] = {
519	{ 0x01, "TEX" },
520	{ 0x0c, "ESETUP" },
521	{ 0x0e, "CTXCTL" },
522	{ 0x0f, "PROP" },
523	{}
524};
525
526const struct nvkm_enum
527gf100_fifo_mmu_fault_access[] = {
528	{ 0x00, "READ" },
529	{ 0x01, "WRITE" },
530	{}
531};
532
533void
534gf100_fifo_mmu_fault_recover(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
535{
536	struct nvkm_subdev *subdev = &fifo->engine.subdev;
537	struct nvkm_device *device = subdev->device;
538	const struct nvkm_enum *er, *ee, *ec, *ea;
539	struct nvkm_engine *engine = NULL;
540	struct nvkm_runl *runl;
541	struct nvkm_engn *engn;
542	struct nvkm_chan *chan;
543	unsigned long flags;
544	char ct[8] = "HUB/";
545
546	/* Lookup engine by MMU fault ID. */
547	nvkm_runl_foreach(runl, fifo) {
548		engn = nvkm_runl_find_engn(engn, runl, engn->fault == info->engine);
549		if (engn) {
550			/* Fault triggered by CTXSW_TIMEOUT recovery procedure. */
551			if (engn->func->mmu_fault_triggered &&
552			    engn->func->mmu_fault_triggered(engn)) {
553				nvkm_runl_rc_engn(runl, engn);
554				return;
555			}
556
557			engine = engn->engine;
558			break;
559		}
560	}
561
562	er = nvkm_enum_find(fifo->func->mmu_fault->reason, info->reason);
563	ee = nvkm_enum_find(fifo->func->mmu_fault->engine, info->engine);
564	if (info->hub) {
565		ec = nvkm_enum_find(fifo->func->mmu_fault->hubclient, info->client);
566	} else {
567		ec = nvkm_enum_find(fifo->func->mmu_fault->gpcclient, info->client);
568		snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
569	}
570	ea = nvkm_enum_find(fifo->func->mmu_fault->access, info->access);
571
572	/* Handle BAR faults. */
573	if (ee && ee->data2) {
574		switch (ee->data2) {
575		case NVKM_SUBDEV_BAR:
576			nvkm_bar_bar1_reset(device);
577			break;
578		case NVKM_SUBDEV_INSTMEM:
579			nvkm_bar_bar2_reset(device);
580			break;
581		case NVKM_ENGINE_IFB:
582			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
583			break;
584		default:
585			break;
586		}
587	}
588
589	chan = nvkm_chan_get_inst(&fifo->engine, info->inst, &flags);
590
591	nvkm_error(subdev,
592		   "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
593		   "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
594		   info->access, ea ? ea->name : "", info->addr,
595		   info->engine, ee ? ee->name : engine ? engine->subdev.name : "",
596		   info->client, ct, ec ? ec->name : "",
597		   info->reason, er ? er->name : "",
598		   chan ? chan->id : -1, info->inst, chan ? chan->name : "unknown");
599
600	/* Handle host/engine faults. */
601	if (chan)
602		nvkm_runl_rc_cgrp(chan->cgrp);
603
604	nvkm_chan_put(&chan, flags);
605}
606
607static const struct nvkm_fifo_func_mmu_fault
608gf100_fifo_mmu_fault = {
609	.recover = gf100_fifo_mmu_fault_recover,
610	.access = gf100_fifo_mmu_fault_access,
611	.engine = gf100_fifo_mmu_fault_engine,
612	.reason = gf100_fifo_mmu_fault_reason,
613	.hubclient = gf100_fifo_mmu_fault_hubclient,
614	.gpcclient = gf100_fifo_mmu_fault_gpcclient,
615};
616
617void
618gf100_fifo_intr_ctxsw_timeout(struct nvkm_fifo *fifo, u32 engm)
619{
620	struct nvkm_runl *runl;
621	struct nvkm_engn *engn, *engn2;
622	bool cgid, cgid2;
623	int id, id2;
624
625	nvkm_runl_foreach(runl, fifo) {
626		/* Stop the runlist, and go through all engines serving it. */
627		nvkm_runl_block(runl);
628		nvkm_runl_foreach_engn_cond(engn, runl, engm & BIT(engn->id)) {
629			/* Determine what channel (group) the engine is on. */
630			id = engn->func->cxid(engn, &cgid);
631			if (id >= 0) {
632				/* Trigger MMU fault on any engine(s) on that channel (group). */
633				nvkm_runl_foreach_engn_cond(engn2, runl, engn2->func->cxid) {
634					id2 = engn2->func->cxid(engn2, &cgid2);
635					if (cgid2 == cgid && id2 == id)
636						engn2->func->mmu_fault_trigger(engn2);
637				}
638			}
639		}
640		nvkm_runl_allow(runl); /* HW will keep runlist blocked via ERROR_SCHED_DISABLE. */
641	}
642}
643
644static void
645gf100_fifo_intr_sched_ctxsw(struct nvkm_fifo *fifo)
646{
647	struct nvkm_runl *runl;
648	struct nvkm_engn *engn;
649	u32 engm = 0;
650
651	/* Look for any engines that are busy, and awaiting chsw ack. */
652	nvkm_runl_foreach(runl, fifo) {
653		nvkm_runl_foreach_engn_cond(engn, runl, engn->func->chsw) {
654			if (WARN_ON(engn->fault < 0) || !engn->func->chsw(engn))
655				continue;
656
657			engm |= BIT(engn->id);
658		}
659	}
660
661	if (!engm)
662		return;
663
664	fifo->func->intr_ctxsw_timeout(fifo, engm);
665}
666
667static const struct nvkm_enum
668gf100_fifo_intr_sched_names[] = {
669	{ 0x0a, "CTXSW_TIMEOUT" },
670	{}
671};
672
673void
674gf100_fifo_intr_sched(struct nvkm_fifo *fifo)
675{
676	struct nvkm_subdev *subdev = &fifo->engine.subdev;
677	struct nvkm_device *device = subdev->device;
678	u32 intr = nvkm_rd32(device, 0x00254c);
679	u32 code = intr & 0x000000ff;
680	const struct nvkm_enum *en;
681
682	en = nvkm_enum_find(gf100_fifo_intr_sched_names, code);
683
684	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
685
686	switch (code) {
687	case 0x0a:
688		gf100_fifo_intr_sched_ctxsw(fifo);
689		break;
690	default:
691		break;
692	}
693}
694
695void
696gf100_fifo_intr_mmu_fault_unit(struct nvkm_fifo *fifo, int unit)
697{
698	struct nvkm_device *device = fifo->engine.subdev.device;
699	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
700	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
701	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
702	u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
703	struct nvkm_fault_data info;
704
705	info.inst   =  (u64)inst << 12;
706	info.addr   = ((u64)vahi << 32) | valo;
707	info.time   = 0;
708	info.engine = unit;
709	info.valid  = 1;
710	info.gpc    = (type & 0x1f000000) >> 24;
711	info.client = (type & 0x00001f00) >> 8;
712	info.access = (type & 0x00000080) >> 7;
713	info.hub    = (type & 0x00000040) >> 6;
714	info.reason = (type & 0x0000000f);
715
716	nvkm_fifo_fault(fifo, &info);
717}
718
719void
720gf100_fifo_intr_mmu_fault(struct nvkm_fifo *fifo)
721{
722	struct nvkm_device *device = fifo->engine.subdev.device;
723	unsigned long mask = nvkm_rd32(device, 0x00259c);
724	int unit;
725
726	for_each_set_bit(unit, &mask, 32) {
727		fifo->func->intr_mmu_fault_unit(fifo, unit);
728		nvkm_wr32(device, 0x00259c, BIT(unit));
729	}
730}
731
732bool
733gf100_fifo_intr_pbdma(struct nvkm_fifo *fifo)
734{
735	struct nvkm_device *device = fifo->engine.subdev.device;
736	struct nvkm_runq *runq;
737	u32 mask = nvkm_rd32(device, 0x0025a0);
738	bool handled = false;
739
740	nvkm_runq_foreach_cond(runq, fifo, mask & BIT(runq->id)) {
741		if (runq->func->intr(runq, NULL))
742			handled = true;
743
744		nvkm_wr32(device, 0x0025a0, BIT(runq->id));
745	}
746
747	return handled;
748}
749
750static void
751gf100_fifo_intr_runlist(struct nvkm_fifo *fifo)
752{
753	struct nvkm_subdev *subdev = &fifo->engine.subdev;
754	struct nvkm_device *device = subdev->device;
755	u32 intr = nvkm_rd32(device, 0x002a00);
756
757	if (intr & 0x10000000) {
758		nvkm_wr32(device, 0x002a00, 0x10000000);
759		intr &= ~0x10000000;
760	}
761
762	if (intr) {
763		nvkm_error(subdev, "RUNLIST %08x\n", intr);
764		nvkm_wr32(device, 0x002a00, intr);
765	}
766}
767
768static void
769gf100_fifo_intr_engine_unit(struct nvkm_fifo *fifo, int engn)
770{
771	struct nvkm_subdev *subdev = &fifo->engine.subdev;
772	struct nvkm_device *device = subdev->device;
773	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
774	u32 inte = nvkm_rd32(device, 0x002628);
775	u32 unkn;
776
777	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
778
779	for (unkn = 0; unkn < 8; unkn++) {
780		u32 ints = (intr >> (unkn * 0x04)) & inte;
781		if (ints & 0x1) {
782			nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
783			ints &= ~1;
784		}
785		if (ints) {
786			nvkm_error(subdev, "ENGINE %d %d %01x", engn, unkn, ints);
787			nvkm_mask(device, 0x002628, ints, 0);
788		}
789	}
790}
791
792static void
793gf100_fifo_intr_engine(struct nvkm_fifo *fifo)
794{
795	struct nvkm_device *device = fifo->engine.subdev.device;
796	u32 mask = nvkm_rd32(device, 0x0025a4);
797
798	while (mask) {
799		u32 unit = __ffs(mask);
800		gf100_fifo_intr_engine_unit(fifo, unit);
801		mask &= ~(1 << unit);
802	}
803}
804
805static irqreturn_t
806gf100_fifo_intr(struct nvkm_inth *inth)
807{
808	struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
809	struct nvkm_subdev *subdev = &fifo->engine.subdev;
810	struct nvkm_device *device = subdev->device;
811	u32 mask = nvkm_rd32(device, 0x002140);
812	u32 stat = nvkm_rd32(device, 0x002100) & mask;
813
814	if (stat & 0x00000001) {
815		u32 intr = nvkm_rd32(device, 0x00252c);
816		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
817		nvkm_wr32(device, 0x002100, 0x00000001);
818		stat &= ~0x00000001;
819	}
820
821	if (stat & 0x00000100) {
822		gf100_fifo_intr_sched(fifo);
823		nvkm_wr32(device, 0x002100, 0x00000100);
824		stat &= ~0x00000100;
825	}
826
827	if (stat & 0x00010000) {
828		u32 intr = nvkm_rd32(device, 0x00256c);
829		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
830		nvkm_wr32(device, 0x002100, 0x00010000);
831		stat &= ~0x00010000;
832	}
833
834	if (stat & 0x01000000) {
835		u32 intr = nvkm_rd32(device, 0x00258c);
836		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
837		nvkm_wr32(device, 0x002100, 0x01000000);
838		stat &= ~0x01000000;
839	}
840
841	if (stat & 0x10000000) {
842		gf100_fifo_intr_mmu_fault(fifo);
843		stat &= ~0x10000000;
844	}
845
846	if (stat & 0x20000000) {
847		if (gf100_fifo_intr_pbdma(fifo))
848			stat &= ~0x20000000;
849	}
850
851	if (stat & 0x40000000) {
852		gf100_fifo_intr_runlist(fifo);
853		stat &= ~0x40000000;
854	}
855
856	if (stat & 0x80000000) {
857		gf100_fifo_intr_engine(fifo);
858		stat &= ~0x80000000;
859	}
860
861	if (stat) {
862		nvkm_error(subdev, "INTR %08x\n", stat);
863		spin_lock(&fifo->lock);
864		nvkm_mask(device, 0x002140, stat, 0x00000000);
865		spin_unlock(&fifo->lock);
866		nvkm_wr32(device, 0x002100, stat);
867	}
868
869	return IRQ_HANDLED;
870}
871
872static void
873gf100_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
874{
875	struct nvkm_device *device = fifo->engine.subdev.device;
876
877	/* Enable PBDMAs. */
878	nvkm_wr32(device, 0x000204, mask);
879	nvkm_wr32(device, 0x002204, mask);
880
881	/* Assign engines to PBDMAs. */
882	if ((mask & 7) == 7) {
883		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
884		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
885		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
886		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
887		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
888		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
889	}
890
891	nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff);
892}
893
894static void
895gf100_fifo_init(struct nvkm_fifo *fifo)
896{
897	struct nvkm_device *device = fifo->engine.subdev.device;
898
899	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
900	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
901
902	nvkm_wr32(device, 0x002100, 0xffffffff);
903	nvkm_wr32(device, 0x002140, 0x7fffffff);
904	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
905}
906
907static int
908gf100_fifo_runl_ctor(struct nvkm_fifo *fifo)
909{
910	struct nvkm_runl *runl;
911
912	runl = nvkm_runl_new(fifo, 0, 0, 0);
913	if (IS_ERR(runl))
914		return PTR_ERR(runl);
915
916	nvkm_runl_add(runl,  0, fifo->func->engn, NVKM_ENGINE_GR, 0);
917	nvkm_runl_add(runl,  1, fifo->func->engn, NVKM_ENGINE_MSPDEC, 0);
918	nvkm_runl_add(runl,  2, fifo->func->engn, NVKM_ENGINE_MSPPP, 0);
919	nvkm_runl_add(runl,  3, fifo->func->engn, NVKM_ENGINE_MSVLD, 0);
920	nvkm_runl_add(runl,  4, fifo->func->engn, NVKM_ENGINE_CE, 0);
921	nvkm_runl_add(runl,  5, fifo->func->engn, NVKM_ENGINE_CE, 1);
922	nvkm_runl_add(runl, 15,   &gf100_engn_sw, NVKM_ENGINE_SW, 0);
923	return 0;
924}
925
926int
927gf100_fifo_runq_nr(struct nvkm_fifo *fifo)
928{
929	struct nvkm_device *device = fifo->engine.subdev.device;
930	u32 save;
931
932	/* Determine number of PBDMAs by checking valid enable bits. */
933	save = nvkm_mask(device, 0x000204, 0xffffffff, 0xffffffff);
934	save = nvkm_mask(device, 0x000204, 0xffffffff, save);
935	return hweight32(save);
936}
937
938int
939gf100_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
940{
941	return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr, &fifo->chid);
942}
943
944static const struct nvkm_fifo_func
945gf100_fifo = {
946	.chid_nr = nv50_fifo_chid_nr,
947	.chid_ctor = gf100_fifo_chid_ctor,
948	.runq_nr = gf100_fifo_runq_nr,
949	.runl_ctor = gf100_fifo_runl_ctor,
950	.init = gf100_fifo_init,
951	.init_pbdmas = gf100_fifo_init_pbdmas,
952	.intr = gf100_fifo_intr,
953	.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
954	.intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
955	.mmu_fault = &gf100_fifo_mmu_fault,
956	.nonstall = &gf100_fifo_nonstall,
957	.runl = &gf100_runl,
958	.runq = &gf100_runq,
959	.engn = &gf100_engn,
960	.cgrp = {{                            }, &nv04_cgrp },
961	.chan = {{ 0, 0, FERMI_CHANNEL_GPFIFO }, &gf100_chan },
962};
963
964int
965gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
966	       struct nvkm_fifo **pfifo)
967{
968	return nvkm_fifo_new_(&gf100_fifo, device, type, inst, pfifo);
969}
970