1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include <nvif/push006c.h>
25
26#include <nvif/class.h>
27#include <nvif/cl0002.h>
28#include <nvif/if0020.h>
29
30#include "nouveau_drv.h"
31#include "nouveau_dma.h"
32#include "nouveau_bo.h"
33#include "nouveau_chan.h"
34#include "nouveau_fence.h"
35#include "nouveau_abi16.h"
36#include "nouveau_vmm.h"
37#include "nouveau_svm.h"
38
39MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
40int nouveau_vram_pushbuf;
41module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
42
43void
44nouveau_channel_kill(struct nouveau_channel *chan)
45{
46	atomic_set(&chan->killed, 1);
47	if (chan->fence)
48		nouveau_fence_context_kill(chan->fence, -ENODEV);
49}
50
51static int
52nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
53{
54	struct nouveau_channel *chan = container_of(event, typeof(*chan), kill);
55	struct nouveau_cli *cli = (void *)chan->user.client;
56
57	NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
58
59	if (unlikely(!atomic_read(&chan->killed)))
60		nouveau_channel_kill(chan);
61
62	return NVIF_EVENT_DROP;
63}
64
65int
66nouveau_channel_idle(struct nouveau_channel *chan)
67{
68	if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
69		struct nouveau_cli *cli = (void *)chan->user.client;
70		struct nouveau_fence *fence = NULL;
71		int ret;
72
73		ret = nouveau_fence_new(&fence, chan);
74		if (!ret) {
75			ret = nouveau_fence_wait(fence, false, false);
76			nouveau_fence_unref(&fence);
77		}
78
79		if (ret) {
80			NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n",
81				  chan->chid, nvxx_client(&cli->base)->name);
82			return ret;
83		}
84	}
85	return 0;
86}
87
88void
89nouveau_channel_del(struct nouveau_channel **pchan)
90{
91	struct nouveau_channel *chan = *pchan;
92	if (chan) {
93		struct nouveau_cli *cli = (void *)chan->user.client;
94
95		if (chan->fence)
96			nouveau_fence(chan->drm)->context_del(chan);
97
98		if (cli)
99			nouveau_svmm_part(chan->vmm->svmm, chan->inst);
100
101		nvif_object_dtor(&chan->blit);
102		nvif_object_dtor(&chan->nvsw);
103		nvif_object_dtor(&chan->gart);
104		nvif_object_dtor(&chan->vram);
105		nvif_event_dtor(&chan->kill);
106		nvif_object_dtor(&chan->user);
107		nvif_mem_dtor(&chan->mem_userd);
108		nvif_object_dtor(&chan->push.ctxdma);
109		nouveau_vma_del(&chan->push.vma);
110		nouveau_bo_unmap(chan->push.buffer);
111		if (chan->push.buffer && chan->push.buffer->bo.pin_count)
112			nouveau_bo_unpin(chan->push.buffer);
113		nouveau_bo_ref(NULL, &chan->push.buffer);
114		kfree(chan);
115	}
116	*pchan = NULL;
117}
118
119static void
120nouveau_channel_kick(struct nvif_push *push)
121{
122	struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
123	chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
124	FIRE_RING(chan);
125	chan->chan._push.bgn = chan->chan._push.cur;
126}
127
128static int
129nouveau_channel_wait(struct nvif_push *push, u32 size)
130{
131	struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
132	int ret;
133	chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
134	ret = RING_SPACE(chan, size);
135	if (ret == 0) {
136		chan->chan._push.bgn = chan->chan._push.mem.object.map.ptr;
137		chan->chan._push.bgn = chan->chan._push.bgn + chan->dma.cur;
138		chan->chan._push.cur = chan->chan._push.bgn;
139		chan->chan._push.end = chan->chan._push.bgn + size;
140	}
141	return ret;
142}
143
144static int
145nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
146		     u32 size, struct nouveau_channel **pchan)
147{
148	struct nouveau_cli *cli = (void *)device->object.client;
149	struct nv_dma_v0 args = {};
150	struct nouveau_channel *chan;
151	u32 target;
152	int ret;
153
154	chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
155	if (!chan)
156		return -ENOMEM;
157
158	chan->device = device;
159	chan->drm = drm;
160	chan->vmm = nouveau_cli_vmm(cli);
161	atomic_set(&chan->killed, 0);
162
163	/* allocate memory for dma push buffer */
164	target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
165	if (nouveau_vram_pushbuf)
166		target = NOUVEAU_GEM_DOMAIN_VRAM;
167
168	ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
169			    &chan->push.buffer);
170	if (ret == 0) {
171		ret = nouveau_bo_pin(chan->push.buffer, target, false);
172		if (ret == 0)
173			ret = nouveau_bo_map(chan->push.buffer);
174	}
175
176	if (ret) {
177		nouveau_channel_del(pchan);
178		return ret;
179	}
180
181	chan->chan._push.mem.object.parent = cli->base.object.parent;
182	chan->chan._push.mem.object.client = &cli->base;
183	chan->chan._push.mem.object.name = "chanPush";
184	chan->chan._push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
185	chan->chan._push.wait = nouveau_channel_wait;
186	chan->chan._push.kick = nouveau_channel_kick;
187	chan->chan.push = &chan->chan._push;
188
189	/* create dma object covering the *entire* memory space that the
190	 * pushbuf lives in, this is because the GEM code requires that
191	 * we be able to call out to other (indirect) push buffers
192	 */
193	chan->push.addr = chan->push.buffer->offset;
194
195	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
196		ret = nouveau_vma_new(chan->push.buffer, chan->vmm,
197				      &chan->push.vma);
198		if (ret) {
199			nouveau_channel_del(pchan);
200			return ret;
201		}
202
203		chan->push.addr = chan->push.vma->addr;
204
205		if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
206			return 0;
207
208		args.target = NV_DMA_V0_TARGET_VM;
209		args.access = NV_DMA_V0_ACCESS_VM;
210		args.start = 0;
211		args.limit = chan->vmm->vmm.limit - 1;
212	} else
213	if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
214		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
215			/* nv04 vram pushbuf hack, retarget to its location in
216			 * the framebuffer bar rather than direct vram access..
217			 * nfi why this exists, it came from the -nv ddx.
218			 */
219			args.target = NV_DMA_V0_TARGET_PCI;
220			args.access = NV_DMA_V0_ACCESS_RDWR;
221			args.start = nvxx_device(device)->func->
222				resource_addr(nvxx_device(device), 1);
223			args.limit = args.start + device->info.ram_user - 1;
224		} else {
225			args.target = NV_DMA_V0_TARGET_VRAM;
226			args.access = NV_DMA_V0_ACCESS_RDWR;
227			args.start = 0;
228			args.limit = device->info.ram_user - 1;
229		}
230	} else {
231		if (chan->drm->agp.bridge) {
232			args.target = NV_DMA_V0_TARGET_AGP;
233			args.access = NV_DMA_V0_ACCESS_RDWR;
234			args.start = chan->drm->agp.base;
235			args.limit = chan->drm->agp.base +
236				     chan->drm->agp.size - 1;
237		} else {
238			args.target = NV_DMA_V0_TARGET_VM;
239			args.access = NV_DMA_V0_ACCESS_RDWR;
240			args.start = 0;
241			args.limit = chan->vmm->vmm.limit - 1;
242		}
243	}
244
245	ret = nvif_object_ctor(&device->object, "abi16PushCtxDma", 0,
246			       NV_DMA_FROM_MEMORY, &args, sizeof(args),
247			       &chan->push.ctxdma);
248	if (ret) {
249		nouveau_channel_del(pchan);
250		return ret;
251	}
252
253	return 0;
254}
255
256static int
257nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
258		     struct nouveau_channel **pchan)
259{
260	const struct nvif_mclass hosts[] = {
261		{  AMPERE_CHANNEL_GPFIFO_B, 0 },
262		{  AMPERE_CHANNEL_GPFIFO_A, 0 },
263		{  TURING_CHANNEL_GPFIFO_A, 0 },
264		{   VOLTA_CHANNEL_GPFIFO_A, 0 },
265		{  PASCAL_CHANNEL_GPFIFO_A, 0 },
266		{ MAXWELL_CHANNEL_GPFIFO_A, 0 },
267		{  KEPLER_CHANNEL_GPFIFO_B, 0 },
268		{  KEPLER_CHANNEL_GPFIFO_A, 0 },
269		{   FERMI_CHANNEL_GPFIFO  , 0 },
270		{     G82_CHANNEL_GPFIFO  , 0 },
271		{    NV50_CHANNEL_GPFIFO  , 0 },
272		{    NV40_CHANNEL_DMA     , 0 },
273		{    NV17_CHANNEL_DMA     , 0 },
274		{    NV10_CHANNEL_DMA     , 0 },
275		{    NV03_CHANNEL_DMA     , 0 },
276		{}
277	};
278	struct {
279		struct nvif_chan_v0 chan;
280		char name[TASK_COMM_LEN+16];
281	} args;
282	struct nouveau_cli *cli = (void *)device->object.client;
283	struct nouveau_channel *chan;
284	const u64 plength = 0x10000;
285	const u64 ioffset = plength;
286	const u64 ilength = 0x02000;
287	char name[TASK_COMM_LEN];
288	int cid, ret;
289	u64 size;
290
291	cid = nvif_mclass(&device->object, hosts);
292	if (cid < 0)
293		return cid;
294
295	if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO)
296		size = plength;
297	else
298		size = ioffset + ilength;
299
300	/* allocate dma push buffer */
301	ret = nouveau_channel_prep(drm, device, size, &chan);
302	*pchan = chan;
303	if (ret)
304		return ret;
305
306	/* create channel object */
307	args.chan.version = 0;
308	args.chan.namelen = sizeof(args.name);
309	args.chan.runlist = __ffs64(runm);
310	args.chan.runq = 0;
311	args.chan.priv = priv;
312	args.chan.devm = BIT(0);
313	if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) {
314		args.chan.vmm = 0;
315		args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
316		args.chan.offset = chan->push.addr;
317		args.chan.length = 0;
318	} else {
319		args.chan.vmm = nvif_handle(&chan->vmm->vmm.object);
320		if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO)
321			args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
322		else
323			args.chan.ctxdma = 0;
324		args.chan.offset = ioffset + chan->push.addr;
325		args.chan.length = ilength;
326	}
327	args.chan.huserd = 0;
328	args.chan.ouserd = 0;
329
330	/* allocate userd */
331	if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) {
332		ret = nvif_mem_ctor(&cli->mmu, "abi16ChanUSERD", NVIF_CLASS_MEM_GF100,
333				    NVIF_MEM_VRAM | NVIF_MEM_COHERENT | NVIF_MEM_MAPPABLE,
334				    0, PAGE_SIZE, NULL, 0, &chan->mem_userd);
335		if (ret)
336			return ret;
337
338		args.chan.huserd = nvif_handle(&chan->mem_userd.object);
339		args.chan.ouserd = 0;
340
341		chan->userd = &chan->mem_userd.object;
342	} else {
343		chan->userd = &chan->user;
344	}
345
346	get_task_comm(name, current);
347	snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current));
348
349	ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
350			       &args, sizeof(args), &chan->user);
351	if (ret) {
352		nouveau_channel_del(pchan);
353		return ret;
354	}
355
356	chan->runlist = args.chan.runlist;
357	chan->chid = args.chan.chid;
358	chan->inst = args.chan.inst;
359	chan->token = args.chan.token;
360	return 0;
361}
362
363static int
364nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
365{
366	struct nvif_device *device = chan->device;
367	struct nouveau_drm *drm = chan->drm;
368	struct nv_dma_v0 args = {};
369	int ret, i;
370
371	ret = nvif_object_map(chan->userd, NULL, 0);
372	if (ret)
373		return ret;
374
375	if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
376		struct {
377			struct nvif_event_v0 base;
378			struct nvif_chan_event_v0 host;
379		} args;
380
381		args.host.version = 0;
382		args.host.type = NVIF_CHAN_EVENT_V0_KILLED;
383
384		ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
385				      nouveau_channel_killed, false,
386				      &args.base, sizeof(args), &chan->kill);
387		if (ret == 0)
388			ret = nvif_event_allow(&chan->kill);
389		if (ret) {
390			NV_ERROR(drm, "Failed to request channel kill "
391				      "notification: %d\n", ret);
392			return ret;
393		}
394	}
395
396	/* allocate dma objects to cover all allowed vram, and gart */
397	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
398		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
399			args.target = NV_DMA_V0_TARGET_VM;
400			args.access = NV_DMA_V0_ACCESS_VM;
401			args.start = 0;
402			args.limit = chan->vmm->vmm.limit - 1;
403		} else {
404			args.target = NV_DMA_V0_TARGET_VRAM;
405			args.access = NV_DMA_V0_ACCESS_RDWR;
406			args.start = 0;
407			args.limit = device->info.ram_user - 1;
408		}
409
410		ret = nvif_object_ctor(&chan->user, "abi16ChanVramCtxDma", vram,
411				       NV_DMA_IN_MEMORY, &args, sizeof(args),
412				       &chan->vram);
413		if (ret)
414			return ret;
415
416		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
417			args.target = NV_DMA_V0_TARGET_VM;
418			args.access = NV_DMA_V0_ACCESS_VM;
419			args.start = 0;
420			args.limit = chan->vmm->vmm.limit - 1;
421		} else
422		if (chan->drm->agp.bridge) {
423			args.target = NV_DMA_V0_TARGET_AGP;
424			args.access = NV_DMA_V0_ACCESS_RDWR;
425			args.start = chan->drm->agp.base;
426			args.limit = chan->drm->agp.base +
427				     chan->drm->agp.size - 1;
428		} else {
429			args.target = NV_DMA_V0_TARGET_VM;
430			args.access = NV_DMA_V0_ACCESS_RDWR;
431			args.start = 0;
432			args.limit = chan->vmm->vmm.limit - 1;
433		}
434
435		ret = nvif_object_ctor(&chan->user, "abi16ChanGartCtxDma", gart,
436				       NV_DMA_IN_MEMORY, &args, sizeof(args),
437				       &chan->gart);
438		if (ret)
439			return ret;
440	}
441
442	/* initialise dma tracking parameters */
443	switch (chan->user.oclass) {
444	case NV03_CHANNEL_DMA:
445	case NV10_CHANNEL_DMA:
446	case NV17_CHANNEL_DMA:
447	case NV40_CHANNEL_DMA:
448		chan->user_put = 0x40;
449		chan->user_get = 0x44;
450		chan->dma.max = (0x10000 / 4) - 2;
451		break;
452	default:
453		chan->user_put = 0x40;
454		chan->user_get = 0x44;
455		chan->user_get_hi = 0x60;
456		chan->dma.ib_base =  0x10000 / 4;
457		chan->dma.ib_max  = NV50_DMA_IB_MAX;
458		chan->dma.ib_put  = 0;
459		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
460		chan->dma.max = chan->dma.ib_base;
461		break;
462	}
463
464	chan->dma.put = 0;
465	chan->dma.cur = chan->dma.put;
466	chan->dma.free = chan->dma.max - chan->dma.cur;
467
468	ret = PUSH_WAIT(chan->chan.push, NOUVEAU_DMA_SKIPS);
469	if (ret)
470		return ret;
471
472	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
473		PUSH_DATA(chan->chan.push, 0x00000000);
474
475	/* allocate software object class (used for fences on <= nv05) */
476	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
477		ret = nvif_object_ctor(&chan->user, "abi16NvswFence", 0x006e,
478				       NVIF_CLASS_SW_NV04,
479				       NULL, 0, &chan->nvsw);
480		if (ret)
481			return ret;
482
483		ret = PUSH_WAIT(chan->chan.push, 2);
484		if (ret)
485			return ret;
486
487		PUSH_NVSQ(chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
488		PUSH_KICK(chan->chan.push);
489	}
490
491	/* initialise synchronisation */
492	return nouveau_fence(chan->drm)->context_new(chan);
493}
494
495int
496nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
497		    bool priv, u64 runm, u32 vram, u32 gart, struct nouveau_channel **pchan)
498{
499	struct nouveau_cli *cli = (void *)device->object.client;
500	int ret;
501
502	ret = nouveau_channel_ctor(drm, device, priv, runm, pchan);
503	if (ret) {
504		NV_PRINTK(dbg, cli, "channel create, %d\n", ret);
505		return ret;
506	}
507
508	ret = nouveau_channel_init(*pchan, vram, gart);
509	if (ret) {
510		NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
511		nouveau_channel_del(pchan);
512		return ret;
513	}
514
515	ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
516	if (ret)
517		nouveau_channel_del(pchan);
518
519	return ret;
520}
521
522void
523nouveau_channels_fini(struct nouveau_drm *drm)
524{
525	kfree(drm->runl);
526}
527
528int
529nouveau_channels_init(struct nouveau_drm *drm)
530{
531	struct {
532		struct nv_device_info_v1 m;
533		struct {
534			struct nv_device_info_v1_data channels;
535			struct nv_device_info_v1_data runlists;
536		} v;
537	} args = {
538		.m.version = 1,
539		.m.count = sizeof(args.v) / sizeof(args.v.channels),
540		.v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
541		.v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS,
542	};
543	struct nvif_object *device = &drm->client.device.object;
544	int ret, i;
545
546	ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
547	if (ret ||
548	    args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data ||
549	    args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
550		return -ENODEV;
551
552	drm->chan_nr = drm->chan_total = args.v.channels.data;
553	drm->runl_nr = fls64(args.v.runlists.data);
554	drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL);
555	if (!drm->runl)
556		return -ENOMEM;
557
558	if (drm->chan_nr == 0) {
559		for (i = 0; i < drm->runl_nr; i++) {
560			if (!(args.v.runlists.data & BIT(i)))
561				continue;
562
563			args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
564			args.v.channels.data = i;
565
566			ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
567			if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
568				return -ENODEV;
569
570			drm->runl[i].chan_nr = args.v.channels.data;
571			drm->runl[i].chan_id_base = drm->chan_total;
572			drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr);
573
574			drm->chan_total += drm->runl[i].chan_nr;
575		}
576	} else {
577		drm->runl[0].context_base = dma_fence_context_alloc(drm->chan_nr);
578		for (i = 1; i < drm->runl_nr; i++)
579			drm->runl[i].context_base = drm->runl[0].context_base;
580
581	}
582
583	return 0;
584}
585