1/*
2 * Copyright 2023 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23#include "cgrp.h"
24#include "chan.h"
25#include "chid.h"
26#include "runl.h"
27
28#include <core/gpuobj.h>
29#include <subdev/gsp.h>
30#include <subdev/mmu.h>
31#include <subdev/vfn.h>
32#include <engine/gr.h>
33
34#include <nvhw/drf.h>
35
36#include <nvrm/nvtypes.h>
37#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
38#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
39#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
40#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
41#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
42#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
43#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
44#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
45#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
46#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
47
48static u32
49r535_chan_doorbell_handle(struct nvkm_chan *chan)
50{
51	return (chan->cgrp->runl->id << 16) | chan->id;
52}
53
54static void
55r535_chan_stop(struct nvkm_chan *chan)
56{
57}
58
59static void
60r535_chan_start(struct nvkm_chan *chan)
61{
62}
63
64static void
65r535_chan_ramfc_clear(struct nvkm_chan *chan)
66{
67	struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
68
69	nvkm_gsp_rm_free(&chan->rm.object);
70
71	dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size,
72			  chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr);
73
74	nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx);
75}
76
77#define CHID_PER_USERD 8
78
79static int
80r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
81{
82	struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
83	struct nvkm_engn *engn;
84	struct nvkm_device *device = fifo->engine.subdev.device;
85	NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
86	const int userd_p = chan->id / CHID_PER_USERD;
87	const int userd_i = chan->id % CHID_PER_USERD;
88	u32 eT = ~0;
89	int ret;
90
91	if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
92		ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
93		if (ret)
94			return ret;
95	}
96
97	nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
98		eT = engn->id;
99		break;
100	}
101
102	if (WARN_ON(eT == ~0))
103		return -EINVAL;
104
105	chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
106						  fifo->rm.mthdbuf_size,
107						  &chan->rm.mthdbuf.addr, GFP_KERNEL);
108	if (!chan->rm.mthdbuf.ptr)
109		return -ENOMEM;
110
111	args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
112				     fifo->func->chan.user.oclass, sizeof(*args),
113				     &chan->rm.object);
114	if (WARN_ON(IS_ERR(args)))
115		return PTR_ERR(args);
116
117	args->gpFifoOffset = offset;
118	args->gpFifoEntries = length / 8;
119
120	args->flags  = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
121	args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
122	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
123	args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
124	if (!priv)
125		args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
126	else
127		args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
128	args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
129	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
130
131	args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
132	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
133	args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
134	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
135
136	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
137	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
138	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
139	args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
140	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
141	args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
142	args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
143	args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
144	args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
145
146	args->hVASpace = chan->vmm->rm.object.handle;
147	args->engineType = eT;
148
149	args->instanceMem.base = chan->inst->addr;
150	args->instanceMem.size = chan->inst->size;
151	args->instanceMem.addressSpace = 2;
152	args->instanceMem.cacheAttrib = 1;
153
154	args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
155	args->userdMem.size = fifo->func->chan.func->userd->size;
156	args->userdMem.addressSpace = 2;
157	args->userdMem.cacheAttrib = 1;
158
159	args->ramfcMem.base = chan->inst->addr + 0;
160	args->ramfcMem.size = 0x200;
161	args->ramfcMem.addressSpace = 2;
162	args->ramfcMem.cacheAttrib = 1;
163
164	args->mthdbufMem.base = chan->rm.mthdbuf.addr;
165	args->mthdbufMem.size = fifo->rm.mthdbuf_size;
166	args->mthdbufMem.addressSpace = 1;
167	args->mthdbufMem.cacheAttrib = 0;
168
169	if (!priv)
170		args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
171	else
172		args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
173	args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
174	args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
175
176	ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
177	if (ret)
178		return ret;
179
180	if (1) {
181		NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl;
182
183		if (1) {
184			NVA06F_CTRL_BIND_PARAMS *ctrl;
185
186			ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
187						    NVA06F_CTRL_CMD_BIND, sizeof(*ctrl));
188			if (WARN_ON(IS_ERR(ctrl)))
189				return PTR_ERR(ctrl);
190
191			ctrl->engineType = eT;
192
193			ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
194			if (ret)
195				return ret;
196		}
197
198		ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
199					    NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl));
200		if (WARN_ON(IS_ERR(ctrl)))
201			return PTR_ERR(ctrl);
202
203		ctrl->bEnable = 1;
204		ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
205	}
206
207	return ret;
208}
209
210static const struct nvkm_chan_func_ramfc
211r535_chan_ramfc = {
212	.write = r535_chan_ramfc_write,
213	.clear = r535_chan_ramfc_clear,
214	.devm = 0xfff,
215	.priv = true,
216};
217
218struct r535_chan_userd {
219	struct nvkm_memory *mem;
220	struct nvkm_memory *map;
221	int chid;
222	u32 used;
223
224	struct list_head head;
225} *userd;
226
227static void
228r535_chan_id_put(struct nvkm_chan *chan)
229{
230	struct nvkm_runl *runl = chan->cgrp->runl;
231	struct nvkm_fifo *fifo = runl->fifo;
232	struct r535_chan_userd *userd;
233
234	mutex_lock(&fifo->userd.mutex);
235	list_for_each_entry(userd, &fifo->userd.list, head) {
236		if (userd->map == chan->userd.mem) {
237			u32 chid = chan->userd.base / chan->func->userd->size;
238
239			userd->used &= ~BIT(chid);
240			if (!userd->used) {
241				nvkm_memory_unref(&userd->map);
242				nvkm_memory_unref(&userd->mem);
243				nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
244				list_del(&userd->head);
245				kfree(userd);
246			}
247
248			break;
249		}
250	}
251	mutex_unlock(&fifo->userd.mutex);
252
253}
254
255static int
256r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
257{
258	const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
259	struct nvkm_runl *runl = chan->cgrp->runl;
260	struct nvkm_fifo *fifo = runl->fifo;
261	struct r535_chan_userd *userd;
262	u32 chid;
263	int ret;
264
265	if (ouserd + chan->func->userd->size >= userd_size ||
266	    (ouserd & (chan->func->userd->size - 1))) {
267		RUNL_DEBUG(runl, "ouserd %llx", ouserd);
268		return -EINVAL;
269	}
270
271	chid = div_u64(ouserd, chan->func->userd->size);
272
273	list_for_each_entry(userd, &fifo->userd.list, head) {
274		if (userd->mem == muserd) {
275			if (userd->used & BIT(chid))
276				return -EBUSY;
277			break;
278		}
279	}
280
281	if (&userd->head == &fifo->userd.list) {
282		if (nvkm_memory_size(muserd) < userd_size) {
283			RUNL_DEBUG(runl, "userd too small");
284			return -EINVAL;
285		}
286
287		userd = kzalloc(sizeof(*userd), GFP_KERNEL);
288		if (!userd)
289			return -ENOMEM;
290
291		userd->chid = nvkm_chid_get(runl->chid, chan);
292		if (userd->chid < 0) {
293			ret = userd->chid;
294			kfree(userd);
295			return ret;
296		}
297
298		userd->mem = nvkm_memory_ref(muserd);
299
300		ret = nvkm_memory_kmap(userd->mem, &userd->map);
301		if (ret) {
302			nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
303			kfree(userd);
304			return ret;
305		}
306
307
308		list_add(&userd->head, &fifo->userd.list);
309	}
310
311	userd->used |= BIT(chid);
312
313	chan->userd.mem = nvkm_memory_ref(userd->map);
314	chan->userd.base = ouserd;
315
316	return (userd->chid * CHID_PER_USERD) + chid;
317}
318
319static int
320r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
321{
322	struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
323	int ret;
324
325	mutex_lock(&fifo->userd.mutex);
326	ret = r535_chan_id_get_locked(chan, muserd, ouserd);
327	mutex_unlock(&fifo->userd.mutex);
328	return ret;
329}
330
331static const struct nvkm_chan_func
332r535_chan = {
333	.id_get = r535_chan_id_get,
334	.id_put = r535_chan_id_put,
335	.inst = &gf100_chan_inst,
336	.userd = &gv100_chan_userd,
337	.ramfc = &r535_chan_ramfc,
338	.start = r535_chan_start,
339	.stop = r535_chan_stop,
340	.doorbell_handle = r535_chan_doorbell_handle,
341};
342
343static const struct nvkm_cgrp_func
344r535_cgrp = {
345};
346
347static int
348r535_engn_nonstall(struct nvkm_engn *engn)
349{
350	struct nvkm_subdev *subdev = &engn->engine->subdev;
351	int ret;
352
353	ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
354	WARN_ON(ret == -ENOENT);
355	return ret;
356}
357
358static const struct nvkm_engn_func
359r535_ce = {
360	.nonstall = r535_engn_nonstall,
361};
362
363static int
364r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
365{
366	/* RM requires GR context buffers to remain mapped until after the
367	 * channel has been destroyed (as opposed to after the last gr obj
368	 * has been deleted).
369	 *
370	 * Take an extra ref here, which will be released once the channel
371	 * object has been deleted.
372	 */
373	refcount_inc(&vctx->refs);
374	chan->rm.grctx = vctx;
375	return 0;
376}
377
378static const struct nvkm_engn_func
379r535_gr = {
380	.nonstall = r535_engn_nonstall,
381	.ctor2 = r535_gr_ctor,
382};
383
384static int
385r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
386{
387	struct nvkm_gsp_client *client = &chan->vmm->rm.client;
388	NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
389
390	ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice,
391				    NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
392	if (IS_ERR(ctrl))
393		return PTR_ERR(ctrl);
394
395	ctrl->hClient = client->object.handle;
396	ctrl->hObject = chan->rm.object.handle;
397	ctrl->hChanClient = client->object.handle;
398	ctrl->virtAddress = vctx->vma->addr;
399	ctrl->size = vctx->inst->size;
400	ctrl->engineType = engn->id;
401	ctrl->ChID = chan->id;
402
403	return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl);
404}
405
406static int
407r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
408{
409	int ret;
410
411	if (WARN_ON(!engn->rm.size))
412		return -EINVAL;
413
414	ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL,
415			      &vctx->inst);
416	if (ret)
417		return ret;
418
419	ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
420	if (ret)
421		return ret;
422
423	ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
424	if (ret)
425		return ret;
426
427	return r535_flcn_bind(engn, vctx, chan);
428}
429
430static const struct nvkm_engn_func
431r535_flcn = {
432	.nonstall = r535_engn_nonstall,
433	.ctor2 = r535_flcn_ctor,
434};
435
436static void
437r535_runl_allow(struct nvkm_runl *runl, u32 engm)
438{
439}
440
441static void
442r535_runl_block(struct nvkm_runl *runl, u32 engm)
443{
444}
445
446static const struct nvkm_runl_func
447r535_runl = {
448	.block = r535_runl_block,
449	.allow = r535_runl_allow,
450};
451
452static int
453r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
454{
455	switch (type) {
456	case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
457	case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
458	case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
459	case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
460	case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
461	case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
462	case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
463	case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
464	default:
465		break;
466	}
467
468	WARN_ON(1);
469	return -EINVAL;
470}
471
472static int
473r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
474{
475	switch (rm) {
476	case RM_ENGINE_TYPE_GR0:
477		*ptype = NVKM_ENGINE_GR;
478		return 0;
479	case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
480		*ptype = NVKM_ENGINE_CE;
481		return rm - RM_ENGINE_TYPE_COPY0;
482	case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
483		*ptype = NVKM_ENGINE_NVDEC;
484		return rm - RM_ENGINE_TYPE_NVDEC0;
485	case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
486		*ptype = NVKM_ENGINE_NVENC;
487		return rm - RM_ENGINE_TYPE_NVENC0;
488	case RM_ENGINE_TYPE_SW:
489		*ptype = NVKM_ENGINE_SW;
490		return 0;
491	case RM_ENGINE_TYPE_SEC2:
492		*ptype = NVKM_ENGINE_SEC2;
493		return 0;
494	case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
495		*ptype = NVKM_ENGINE_NVJPG;
496		return rm - RM_ENGINE_TYPE_NVJPEG0;
497	case RM_ENGINE_TYPE_OFA:
498		*ptype = NVKM_ENGINE_OFA;
499		return 0;
500	default:
501		return -EINVAL;
502	}
503}
504
505static int
506r535_fifo_ectx_size(struct nvkm_fifo *fifo)
507{
508	NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
509	struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
510	struct nvkm_runl *runl;
511	struct nvkm_engn *engn;
512
513	ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
514				   NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO,
515				   sizeof(*ctrl));
516	if (WARN_ON(IS_ERR(ctrl)))
517		return PTR_ERR(ctrl);
518
519	for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
520		nvkm_runl_foreach(runl, fifo) {
521			nvkm_runl_foreach_engn(engn, runl) {
522				if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
523					engn->rm.size =
524						ctrl->constructedFalconsTable[i].ctxBufferSize;
525					break;
526				}
527			}
528		}
529	}
530
531	nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
532	return 0;
533}
534
535static int
536r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
537{
538	struct nvkm_subdev *subdev = &fifo->engine.subdev;
539	struct nvkm_gsp *gsp = subdev->device->gsp;
540	struct nvkm_runl *runl;
541	struct nvkm_engn *engn;
542	u32 cgids = 2048;
543	u32 chids = 2048;
544	int ret;
545	NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
546
547	if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
548	    (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
549		return ret;
550
551	ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
552				   NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
553	if (WARN_ON(IS_ERR(ctrl)))
554		return PTR_ERR(ctrl);
555
556	for (int i = 0; i < ctrl->numEntries; i++) {
557		const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
558		const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
559
560		runl = nvkm_runl_get(fifo, id, addr);
561		if (!runl) {
562			runl = nvkm_runl_new(fifo, id, addr, 0);
563			if (WARN_ON(IS_ERR(runl)))
564				continue;
565		}
566	}
567
568	for (int i = 0; i < ctrl->numEntries; i++) {
569		const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
570		const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE];
571		const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
572		enum nvkm_subdev_type type;
573		int inst, nv2080;
574
575		runl = nvkm_runl_get(fifo, id, addr);
576		if (!runl)
577			continue;
578
579		inst = r535_fifo_engn_type(rmid, &type);
580		if (inst < 0) {
581			nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
582			nvkm_runl_del(runl);
583			continue;
584		}
585
586		nv2080 = r535_fifo_2080_type(type, inst);
587		if (nv2080 < 0) {
588			nvkm_runl_del(runl);
589			continue;
590		}
591
592		switch (type) {
593		case NVKM_ENGINE_CE:
594			engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
595			break;
596		case NVKM_ENGINE_GR:
597			engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
598			break;
599		case NVKM_ENGINE_NVDEC:
600		case NVKM_ENGINE_NVENC:
601		case NVKM_ENGINE_NVJPG:
602		case NVKM_ENGINE_OFA:
603			engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst);
604			break;
605		case NVKM_ENGINE_SW:
606			continue;
607		default:
608			engn = NULL;
609			break;
610		}
611
612		if (!engn) {
613			nvkm_runl_del(runl);
614			continue;
615		}
616
617		engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC];
618	}
619
620	nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
621
622	{
623		NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl;
624
625		ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
626					   NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE,
627					   sizeof(*ctrl));
628		if (IS_ERR(ctrl))
629			return PTR_ERR(ctrl);
630
631		fifo->rm.mthdbuf_size = ctrl->size;
632
633		nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
634	}
635
636	return r535_fifo_ectx_size(fifo);
637}
638
639static void
640r535_fifo_dtor(struct nvkm_fifo *fifo)
641{
642	kfree(fifo->func);
643}
644
645int
646r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
647	      enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
648{
649	struct nvkm_fifo_func *rm;
650
651	if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
652		return -ENOMEM;
653
654	rm->dtor = r535_fifo_dtor;
655	rm->runl_ctor = r535_fifo_runl_ctor;
656	rm->runl = &r535_runl;
657	rm->cgrp = hw->cgrp;
658	rm->cgrp.func = &r535_cgrp;
659	rm->chan = hw->chan;
660	rm->chan.func = &r535_chan;
661	rm->nonstall = &ga100_fifo_nonstall;
662	rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
663
664	return nvkm_fifo_new_(rm, device, type, inst, pfifo);
665}
666