1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "chan.h"
26#include "hdmi.h"
27#include "head.h"
28#include "ior.h"
29#include "outp.h"
30
31#include <core/ramht.h>
32#include <subdev/timer.h>
33
34#include <nvif/class.h>
35
36static void
37gf119_sor_hda_device_entry(struct nvkm_ior *ior, int head)
38{
39	struct nvkm_device *device = ior->disp->engine.subdev.device;
40	const u32 hoff = 0x800 * head;
41
42	nvkm_mask(device, 0x616548 + hoff, 0x00000070, head << 4);
43}
44
45void
46gf119_sor_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
47{
48	struct nvkm_device *device = ior->disp->engine.subdev.device;
49	const u32 soff = 0x030 * ior->id + (head * 0x04);
50	int i;
51
52	for (i = 0; i < size; i++)
53		nvkm_wr32(device, 0x10ec00 + soff, (i << 8) | data[i]);
54	for (; i < 0x60; i++)
55		nvkm_wr32(device, 0x10ec00 + soff, (i << 8));
56	nvkm_mask(device, 0x10ec10 + soff, 0x80000002, 0x80000002);
57}
58
59void
60gf119_sor_hda_hpd(struct nvkm_ior *ior, int head, bool present)
61{
62	struct nvkm_device *device = ior->disp->engine.subdev.device;
63	const u32 soff = 0x030 * ior->id + (head * 0x04);
64	u32 data = 0x80000000;
65	u32 mask = 0x80000001;
66
67	if (present) {
68		ior->func->hda->device_entry(ior, head);
69		data |= 0x00000001;
70	} else {
71		mask |= 0x00000002;
72	}
73
74	nvkm_mask(device, 0x10ec10 + soff, mask, data);
75}
76
77const struct nvkm_ior_func_hda
78gf119_sor_hda = {
79	.hpd = gf119_sor_hda_hpd,
80	.eld = gf119_sor_hda_eld,
81	.device_entry = gf119_sor_hda_device_entry,
82};
83
84void
85gf119_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
86{
87	struct nvkm_device *device = sor->disp->engine.subdev.device;
88	const u32 hoff = head * 0x800;
89
90	nvkm_mask(device, 0x616610 + hoff, 0x0800003f, 0x08000000 | watermark);
91}
92
93void
94gf119_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
95{
96	struct nvkm_device *device = sor->disp->engine.subdev.device;
97	const u32 hoff = head * 0x800;
98
99	nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, h);
100	nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, v);
101}
102
103void
104gf119_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
105{
106	struct nvkm_device *device = sor->disp->engine.subdev.device;
107	const u32 hoff = 0x800 * head;
108	const u32 data = 0x80000000 | (0x00000001 * enable);
109	const u32 mask = 0x8000000d;
110
111	nvkm_mask(device, 0x616618 + hoff, mask, data);
112	nvkm_msec(device, 2000,
113		if (!(nvkm_rd32(device, 0x616618 + hoff) & 0x80000000))
114			break;
115	);
116}
117
118void
119gf119_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
120{
121	struct nvkm_device *device = sor->disp->engine.subdev.device;
122	const u32 hoff = head * 0x800;
123
124	nvkm_mask(device, 0x616588 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
125	nvkm_mask(device, 0x61658c + hoff, 0xffffffff, (aligned << 16) | pbn);
126}
127
128void
129gf119_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
130{
131	struct nvkm_device *device = sor->disp->engine.subdev.device;
132	const u32  loff = nv50_sor_link(sor);
133	const u32 shift = sor->func->dp->lanes[ln] * 8;
134	u32 data[4];
135
136	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
137	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
138	data[2] = nvkm_rd32(device, 0x61c130 + loff);
139	if ((data[2] & 0x0000ff00) < (pu << 8) || ln == 0)
140		data[2] = (data[2] & ~0x0000ff00) | (pu << 8);
141
142	nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift));
143	nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift));
144	nvkm_wr32(device, 0x61c130 + loff, data[2]);
145
146	data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
147	nvkm_wr32(device, 0x61c13c + loff, data[3] | (pc << shift));
148}
149
150static void
151gf119_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
152{
153	struct nvkm_device *device = sor->disp->engine.subdev.device;
154	const u32 soff = nv50_ior_base(sor);
155	u32 data;
156
157	switch (pattern) {
158	case 0: data = 0x10101010; break;
159	case 1: data = 0x01010101; break;
160	case 2: data = 0x02020202; break;
161	case 3: data = 0x03030303; break;
162	default:
163		WARN_ON(1);
164		return;
165	}
166
167	nvkm_mask(device, 0x61c110 + soff, 0x1f1f1f1f, data);
168}
169
170int
171gf119_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
172{
173	struct nvkm_device *device = sor->disp->engine.subdev.device;
174	const u32 soff = nv50_ior_base(sor);
175	const u32 loff = nv50_sor_link(sor);
176	u32 dpctrl = 0x00000000;
177	u32 clksor = 0x00000000;
178
179	clksor |= sor->dp.bw << 18;
180	dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
181	if (sor->dp.mst)
182		dpctrl |= 0x40000000;
183	if (sor->dp.ef)
184		dpctrl |= 0x00004000;
185
186	nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
187	nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
188	return 0;
189}
190
191const struct nvkm_ior_func_dp
192gf119_sor_dp = {
193	.lanes = { 2, 1, 0, 3 },
194	.links = gf119_sor_dp_links,
195	.power = g94_sor_dp_power,
196	.pattern = gf119_sor_dp_pattern,
197	.drive = gf119_sor_dp_drive,
198	.vcpi = gf119_sor_dp_vcpi,
199	.audio = gf119_sor_dp_audio,
200	.audio_sym = gf119_sor_dp_audio_sym,
201	.watermark = gf119_sor_dp_watermark,
202};
203
204static void
205gf119_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
206{
207	struct nvkm_device *device = ior->disp->engine.subdev.device;
208	struct packed_hdmi_infoframe vsi;
209	const u32 hoff = head * 0x800;
210
211	pack_hdmi_infoframe(&vsi, data, size);
212
213	nvkm_mask(device, 0x616730 + hoff, 0x00010001, 0x00010000);
214	if (!size)
215		return;
216
217	/*
218	 * These appear to be the audio infoframe registers,
219	 * but no other set of infoframe registers has yet
220	 * been found.
221	 */
222	nvkm_wr32(device, 0x616738 + hoff, vsi.header);
223	nvkm_wr32(device, 0x61673c + hoff, vsi.subpack0_low);
224	nvkm_wr32(device, 0x616740 + hoff, vsi.subpack0_high);
225	/* Is there a second (or further?) set of subpack registers here? */
226
227	nvkm_mask(device, 0x616730 + hoff, 0x00000001, 0x00000001);
228}
229
230static void
231gf119_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
232{
233	struct nvkm_device *device = ior->disp->engine.subdev.device;
234	struct packed_hdmi_infoframe avi;
235	const u32 hoff = head * 0x800;
236
237	pack_hdmi_infoframe(&avi, data, size);
238
239	nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
240	if (!size)
241		return;
242
243	nvkm_wr32(device, 0x61671c + hoff, avi.header);
244	nvkm_wr32(device, 0x616720 + hoff, avi.subpack0_low);
245	nvkm_wr32(device, 0x616724 + hoff, avi.subpack0_high);
246	nvkm_wr32(device, 0x616728 + hoff, avi.subpack1_low);
247	nvkm_wr32(device, 0x61672c + hoff, avi.subpack1_high);
248
249	nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000001);
250}
251
252static void
253gf119_sor_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, u8 rekey)
254{
255	struct nvkm_device *device = ior->disp->engine.subdev.device;
256	const u32 ctrl = 0x40000000 * enable |
257			 max_ac_packet << 16 |
258			 rekey;
259	const u32 hoff = head * 0x800;
260
261	if (!(ctrl & 0x40000000)) {
262		nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
263		nvkm_mask(device, 0x616730 + hoff, 0x00000001, 0x00000000);
264		nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
265		nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
266		return;
267	}
268
269	/* ??? InfoFrame? */
270	nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
271	nvkm_wr32(device, 0x6167ac + hoff, 0x00000010);
272	nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000001);
273
274	/* HDMI_CTRL */
275	nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
276}
277
278static const struct nvkm_ior_func_hdmi
279gf119_sor_hdmi = {
280	.ctrl = gf119_sor_hdmi_ctrl,
281	.infoframe_avi = gf119_sor_hdmi_infoframe_avi,
282	.infoframe_vsi = gf119_sor_hdmi_infoframe_vsi,
283};
284
285void
286gf119_sor_clock(struct nvkm_ior *sor)
287{
288	struct nvkm_device *device = sor->disp->engine.subdev.device;
289	const u32 soff = nv50_ior_base(sor);
290	u32 div1 = sor->asy.link == 3;
291	u32 div2 = sor->asy.link == 3;
292
293	if (sor->asy.proto == TMDS) {
294		const u32 speed = sor->tmds.high_speed ? 0x14 : 0x0a;
295		nvkm_mask(device, 0x612300 + soff, 0x007c0000, speed << 18);
296		if (sor->tmds.high_speed)
297			div2 = 1;
298	}
299
300	nvkm_mask(device, 0x612300 + soff, 0x00000707, (div2 << 8) | div1);
301}
302
303void
304gf119_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
305{
306	struct nvkm_device *device = sor->disp->engine.subdev.device;
307	const u32 coff = (state == &sor->asy) * 0x20000 + sor->id * 0x20;
308	u32 ctrl = nvkm_rd32(device, 0x640200 + coff);
309
310	state->proto_evo = (ctrl & 0x00000f00) >> 8;
311	switch (state->proto_evo) {
312	case 0: state->proto = LVDS; state->link = 1; break;
313	case 1: state->proto = TMDS; state->link = 1; break;
314	case 2: state->proto = TMDS; state->link = 2; break;
315	case 5: state->proto = TMDS; state->link = 3; break;
316	case 8: state->proto =   DP; state->link = 1; break;
317	case 9: state->proto =   DP; state->link = 2; break;
318	default:
319		state->proto = UNKNOWN;
320		break;
321	}
322
323	state->head = ctrl & 0x0000000f;
324}
325
326static const struct nvkm_ior_func
327gf119_sor = {
328	.state = gf119_sor_state,
329	.power = nv50_sor_power,
330	.clock = gf119_sor_clock,
331	.bl = &gt215_sor_bl,
332	.hdmi = &gf119_sor_hdmi,
333	.dp = &gf119_sor_dp,
334	.hda = &gf119_sor_hda,
335};
336
337static int
338gf119_sor_new(struct nvkm_disp *disp, int id)
339{
340	return nvkm_ior_new_(&gf119_sor, disp, SOR, id, true);
341}
342
343int
344gf119_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
345{
346	struct nvkm_device *device = disp->engine.subdev.device;
347	*pmask = (nvkm_rd32(device, 0x612004) & 0x0000ff00) >> 8;
348	return 8;
349}
350
351static void
352gf119_dac_clock(struct nvkm_ior *dac)
353{
354	struct nvkm_device *device = dac->disp->engine.subdev.device;
355	const u32 doff = nv50_ior_base(dac);
356	nvkm_mask(device, 0x612280 + doff, 0x07070707, 0x00000000);
357}
358
359static void
360gf119_dac_state(struct nvkm_ior *dac, struct nvkm_ior_state *state)
361{
362	struct nvkm_device *device = dac->disp->engine.subdev.device;
363	const u32 coff = (state == &dac->asy) * 0x20000 + dac->id * 0x20;
364	u32 ctrl = nvkm_rd32(device, 0x640180 + coff);
365
366	state->proto_evo = (ctrl & 0x00000f00) >> 8;
367	switch (state->proto_evo) {
368	case 0: state->proto = CRT; break;
369	default:
370		state->proto = UNKNOWN;
371		break;
372	}
373
374	state->head = ctrl & 0x0000000f;
375}
376
377static const struct nvkm_ior_func
378gf119_dac = {
379	.state = gf119_dac_state,
380	.power = nv50_dac_power,
381	.sense = nv50_dac_sense,
382	.clock = gf119_dac_clock,
383};
384
385int
386gf119_dac_new(struct nvkm_disp *disp, int id)
387{
388	return nvkm_ior_new_(&gf119_dac, disp, DAC, id, false);
389}
390
391int
392gf119_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
393{
394	struct nvkm_device *device = disp->engine.subdev.device;
395	*pmask = (nvkm_rd32(device, 0x612004) & 0x000000f0) >> 4;
396	return 4;
397}
398
399static void
400gf119_head_vblank_put(struct nvkm_head *head)
401{
402	struct nvkm_device *device = head->disp->engine.subdev.device;
403	const u32 hoff = head->id * 0x800;
404	nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000000);
405}
406
407static void
408gf119_head_vblank_get(struct nvkm_head *head)
409{
410	struct nvkm_device *device = head->disp->engine.subdev.device;
411	const u32 hoff = head->id * 0x800;
412	nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000001);
413}
414
415void
416gf119_head_rgclk(struct nvkm_head *head, int div)
417{
418	struct nvkm_device *device = head->disp->engine.subdev.device;
419	nvkm_mask(device, 0x612200 + (head->id * 0x800), 0x0000000f, div);
420}
421
422static void
423gf119_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
424{
425	struct nvkm_device *device = head->disp->engine.subdev.device;
426	const u32 hoff = (state == &head->asy) * 0x20000 + head->id * 0x300;
427	u32 data;
428
429	data = nvkm_rd32(device, 0x640414 + hoff);
430	state->vtotal = (data & 0xffff0000) >> 16;
431	state->htotal = (data & 0x0000ffff);
432	data = nvkm_rd32(device, 0x640418 + hoff);
433	state->vsynce = (data & 0xffff0000) >> 16;
434	state->hsynce = (data & 0x0000ffff);
435	data = nvkm_rd32(device, 0x64041c + hoff);
436	state->vblanke = (data & 0xffff0000) >> 16;
437	state->hblanke = (data & 0x0000ffff);
438	data = nvkm_rd32(device, 0x640420 + hoff);
439	state->vblanks = (data & 0xffff0000) >> 16;
440	state->hblanks = (data & 0x0000ffff);
441	state->hz = nvkm_rd32(device, 0x640450 + hoff);
442
443	data = nvkm_rd32(device, 0x640404 + hoff);
444	switch ((data & 0x000003c0) >> 6) {
445	case 6: state->or.depth = 30; break;
446	case 5: state->or.depth = 24; break;
447	case 2: state->or.depth = 18; break;
448	case 0: state->or.depth = 18; break; /*XXX: "default" */
449	default:
450		state->or.depth = 18;
451		WARN_ON(1);
452		break;
453	}
454}
455
456static const struct nvkm_head_func
457gf119_head = {
458	.state = gf119_head_state,
459	.rgpos = nv50_head_rgpos,
460	.rgclk = gf119_head_rgclk,
461	.vblank_get = gf119_head_vblank_get,
462	.vblank_put = gf119_head_vblank_put,
463};
464
465int
466gf119_head_new(struct nvkm_disp *disp, int id)
467{
468	return nvkm_head_new_(&gf119_head, disp, id);
469}
470
471int
472gf119_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
473{
474	struct nvkm_device *device = disp->engine.subdev.device;
475	*pmask = nvkm_rd32(device, 0x612004) & 0x0000000f;
476	return nvkm_rd32(device, 0x022448);
477}
478
479static void
480gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
481{
482	struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
483	struct nvkm_device *device = disp->engine.subdev.device;
484	nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000000 << index);
485	nvkm_wr32(device, 0x61008c, 0x00000001 << index);
486}
487
488static void
489gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
490{
491	struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
492	struct nvkm_device *device = disp->engine.subdev.device;
493	nvkm_wr32(device, 0x61008c, 0x00000001 << index);
494	nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000001 << index);
495}
496
497const struct nvkm_event_func
498gf119_disp_chan_uevent = {
499	.init = gf119_disp_chan_uevent_init,
500	.fini = gf119_disp_chan_uevent_fini,
501};
502
503void
504gf119_disp_chan_intr(struct nvkm_disp_chan *chan, bool en)
505{
506	struct nvkm_device *device = chan->disp->engine.subdev.device;
507	const u32 mask = 0x00000001 << chan->chid.user;
508	if (!en) {
509		nvkm_mask(device, 0x610090, mask, 0x00000000);
510		nvkm_mask(device, 0x6100a0, mask, 0x00000000);
511	} else {
512		nvkm_mask(device, 0x6100a0, mask, mask);
513	}
514}
515
516static void
517gf119_disp_pioc_fini(struct nvkm_disp_chan *chan)
518{
519	struct nvkm_disp *disp = chan->disp;
520	struct nvkm_subdev *subdev = &disp->engine.subdev;
521	struct nvkm_device *device = subdev->device;
522	int ctrl = chan->chid.ctrl;
523	int user = chan->chid.user;
524
525	nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
526	if (nvkm_msec(device, 2000,
527		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
528			break;
529	) < 0) {
530		nvkm_error(subdev, "ch %d fini: %08x\n", user,
531			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
532	}
533}
534
535static int
536gf119_disp_pioc_init(struct nvkm_disp_chan *chan)
537{
538	struct nvkm_disp *disp = chan->disp;
539	struct nvkm_subdev *subdev = &disp->engine.subdev;
540	struct nvkm_device *device = subdev->device;
541	int ctrl = chan->chid.ctrl;
542	int user = chan->chid.user;
543
544	/* activate channel */
545	nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
546	if (nvkm_msec(device, 2000,
547		u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
548		if ((tmp & 0x00030000) == 0x00010000)
549			break;
550	) < 0) {
551		nvkm_error(subdev, "ch %d init: %08x\n", user,
552			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
553		return -EBUSY;
554	}
555
556	return 0;
557}
558
559const struct nvkm_disp_chan_func
560gf119_disp_pioc_func = {
561	.init = gf119_disp_pioc_init,
562	.fini = gf119_disp_pioc_fini,
563	.intr = gf119_disp_chan_intr,
564	.user = nv50_disp_chan_user,
565};
566
567int
568gf119_disp_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
569{
570	return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
571				 chan->chid.user << 27 | 0x00000001);
572}
573
574void
575gf119_disp_dmac_fini(struct nvkm_disp_chan *chan)
576{
577	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
578	struct nvkm_device *device = subdev->device;
579	int ctrl = chan->chid.ctrl;
580	int user = chan->chid.user;
581
582	/* deactivate channel */
583	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
584	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
585	if (nvkm_msec(device, 2000,
586		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
587			break;
588	) < 0) {
589		nvkm_error(subdev, "ch %d fini: %08x\n", user,
590			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
591	}
592
593	chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
594}
595
596static int
597gf119_disp_dmac_init(struct nvkm_disp_chan *chan)
598{
599	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
600	struct nvkm_device *device = subdev->device;
601	int ctrl = chan->chid.ctrl;
602	int user = chan->chid.user;
603
604	/* initialise channel for dma command submission */
605	nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
606	nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
607	nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
608	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
609	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
610	nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
611
612	/* wait for it to go inactive */
613	if (nvkm_msec(device, 2000,
614		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
615			break;
616	) < 0) {
617		nvkm_error(subdev, "ch %d init: %08x\n", user,
618			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
619		return -EBUSY;
620	}
621
622	return 0;
623}
624
625const struct nvkm_disp_chan_func
626gf119_disp_dmac_func = {
627	.push = nv50_disp_dmac_push,
628	.init = gf119_disp_dmac_init,
629	.fini = gf119_disp_dmac_fini,
630	.intr = gf119_disp_chan_intr,
631	.user = nv50_disp_chan_user,
632	.bind = gf119_disp_dmac_bind,
633};
634
635const struct nvkm_disp_chan_user
636gf119_disp_curs = {
637	.func = &gf119_disp_pioc_func,
638	.ctrl = 13,
639	.user = 13,
640};
641
642const struct nvkm_disp_chan_user
643gf119_disp_oimm = {
644	.func = &gf119_disp_pioc_func,
645	.ctrl = 9,
646	.user = 9,
647};
648
649static const struct nvkm_disp_mthd_list
650gf119_disp_ovly_mthd_base = {
651	.mthd = 0x0000,
652	.data = {
653		{ 0x0080, 0x665080 },
654		{ 0x0084, 0x665084 },
655		{ 0x0088, 0x665088 },
656		{ 0x008c, 0x66508c },
657		{ 0x0090, 0x665090 },
658		{ 0x0094, 0x665094 },
659		{ 0x00a0, 0x6650a0 },
660		{ 0x00a4, 0x6650a4 },
661		{ 0x00b0, 0x6650b0 },
662		{ 0x00b4, 0x6650b4 },
663		{ 0x00b8, 0x6650b8 },
664		{ 0x00c0, 0x6650c0 },
665		{ 0x00e0, 0x6650e0 },
666		{ 0x00e4, 0x6650e4 },
667		{ 0x00e8, 0x6650e8 },
668		{ 0x0100, 0x665100 },
669		{ 0x0104, 0x665104 },
670		{ 0x0108, 0x665108 },
671		{ 0x010c, 0x66510c },
672		{ 0x0110, 0x665110 },
673		{ 0x0118, 0x665118 },
674		{ 0x011c, 0x66511c },
675		{ 0x0120, 0x665120 },
676		{ 0x0124, 0x665124 },
677		{ 0x0130, 0x665130 },
678		{ 0x0134, 0x665134 },
679		{ 0x0138, 0x665138 },
680		{ 0x013c, 0x66513c },
681		{ 0x0140, 0x665140 },
682		{ 0x0144, 0x665144 },
683		{ 0x0148, 0x665148 },
684		{ 0x014c, 0x66514c },
685		{ 0x0150, 0x665150 },
686		{ 0x0154, 0x665154 },
687		{ 0x0158, 0x665158 },
688		{ 0x015c, 0x66515c },
689		{ 0x0160, 0x665160 },
690		{ 0x0164, 0x665164 },
691		{ 0x0168, 0x665168 },
692		{ 0x016c, 0x66516c },
693		{ 0x0400, 0x665400 },
694		{ 0x0408, 0x665408 },
695		{ 0x040c, 0x66540c },
696		{ 0x0410, 0x665410 },
697		{}
698	}
699};
700
701static const struct nvkm_disp_chan_mthd
702gf119_disp_ovly_mthd = {
703	.name = "Overlay",
704	.addr = 0x001000,
705	.prev = -0x020000,
706	.data = {
707		{ "Global", 1, &gf119_disp_ovly_mthd_base },
708		{}
709	}
710};
711
712static const struct nvkm_disp_chan_user
713gf119_disp_ovly = {
714	.func = &gf119_disp_dmac_func,
715	.ctrl = 5,
716	.user = 5,
717	.mthd = &gf119_disp_ovly_mthd,
718};
719
720static const struct nvkm_disp_mthd_list
721gf119_disp_base_mthd_base = {
722	.mthd = 0x0000,
723	.addr = 0x000000,
724	.data = {
725		{ 0x0080, 0x661080 },
726		{ 0x0084, 0x661084 },
727		{ 0x0088, 0x661088 },
728		{ 0x008c, 0x66108c },
729		{ 0x0090, 0x661090 },
730		{ 0x0094, 0x661094 },
731		{ 0x00a0, 0x6610a0 },
732		{ 0x00a4, 0x6610a4 },
733		{ 0x00c0, 0x6610c0 },
734		{ 0x00c4, 0x6610c4 },
735		{ 0x00c8, 0x6610c8 },
736		{ 0x00cc, 0x6610cc },
737		{ 0x00e0, 0x6610e0 },
738		{ 0x00e4, 0x6610e4 },
739		{ 0x00e8, 0x6610e8 },
740		{ 0x00ec, 0x6610ec },
741		{ 0x00fc, 0x6610fc },
742		{ 0x0100, 0x661100 },
743		{ 0x0104, 0x661104 },
744		{ 0x0108, 0x661108 },
745		{ 0x010c, 0x66110c },
746		{ 0x0110, 0x661110 },
747		{ 0x0114, 0x661114 },
748		{ 0x0118, 0x661118 },
749		{ 0x011c, 0x66111c },
750		{ 0x0130, 0x661130 },
751		{ 0x0134, 0x661134 },
752		{ 0x0138, 0x661138 },
753		{ 0x013c, 0x66113c },
754		{ 0x0140, 0x661140 },
755		{ 0x0144, 0x661144 },
756		{ 0x0148, 0x661148 },
757		{ 0x014c, 0x66114c },
758		{ 0x0150, 0x661150 },
759		{ 0x0154, 0x661154 },
760		{ 0x0158, 0x661158 },
761		{ 0x015c, 0x66115c },
762		{ 0x0160, 0x661160 },
763		{ 0x0164, 0x661164 },
764		{ 0x0168, 0x661168 },
765		{ 0x016c, 0x66116c },
766		{}
767	}
768};
769
770static const struct nvkm_disp_mthd_list
771gf119_disp_base_mthd_image = {
772	.mthd = 0x0020,
773	.addr = 0x000020,
774	.data = {
775		{ 0x0400, 0x661400 },
776		{ 0x0404, 0x661404 },
777		{ 0x0408, 0x661408 },
778		{ 0x040c, 0x66140c },
779		{ 0x0410, 0x661410 },
780		{}
781	}
782};
783
784const struct nvkm_disp_chan_mthd
785gf119_disp_base_mthd = {
786	.name = "Base",
787	.addr = 0x001000,
788	.prev = -0x020000,
789	.data = {
790		{ "Global", 1, &gf119_disp_base_mthd_base },
791		{  "Image", 2, &gf119_disp_base_mthd_image },
792		{}
793	}
794};
795
796const struct nvkm_disp_chan_user
797gf119_disp_base = {
798	.func = &gf119_disp_dmac_func,
799	.ctrl = 1,
800	.user = 1,
801	.mthd = &gf119_disp_base_mthd,
802};
803
804const struct nvkm_disp_mthd_list
805gf119_disp_core_mthd_base = {
806	.mthd = 0x0000,
807	.addr = 0x000000,
808	.data = {
809		{ 0x0080, 0x660080 },
810		{ 0x0084, 0x660084 },
811		{ 0x0088, 0x660088 },
812		{ 0x008c, 0x000000 },
813		{}
814	}
815};
816
817const struct nvkm_disp_mthd_list
818gf119_disp_core_mthd_dac = {
819	.mthd = 0x0020,
820	.addr = 0x000020,
821	.data = {
822		{ 0x0180, 0x660180 },
823		{ 0x0184, 0x660184 },
824		{ 0x0188, 0x660188 },
825		{ 0x0190, 0x660190 },
826		{}
827	}
828};
829
830const struct nvkm_disp_mthd_list
831gf119_disp_core_mthd_sor = {
832	.mthd = 0x0020,
833	.addr = 0x000020,
834	.data = {
835		{ 0x0200, 0x660200 },
836		{ 0x0204, 0x660204 },
837		{ 0x0208, 0x660208 },
838		{ 0x0210, 0x660210 },
839		{}
840	}
841};
842
843const struct nvkm_disp_mthd_list
844gf119_disp_core_mthd_pior = {
845	.mthd = 0x0020,
846	.addr = 0x000020,
847	.data = {
848		{ 0x0300, 0x660300 },
849		{ 0x0304, 0x660304 },
850		{ 0x0308, 0x660308 },
851		{ 0x0310, 0x660310 },
852		{}
853	}
854};
855
856static const struct nvkm_disp_mthd_list
857gf119_disp_core_mthd_head = {
858	.mthd = 0x0300,
859	.addr = 0x000300,
860	.data = {
861		{ 0x0400, 0x660400 },
862		{ 0x0404, 0x660404 },
863		{ 0x0408, 0x660408 },
864		{ 0x040c, 0x66040c },
865		{ 0x0410, 0x660410 },
866		{ 0x0414, 0x660414 },
867		{ 0x0418, 0x660418 },
868		{ 0x041c, 0x66041c },
869		{ 0x0420, 0x660420 },
870		{ 0x0424, 0x660424 },
871		{ 0x0428, 0x660428 },
872		{ 0x042c, 0x66042c },
873		{ 0x0430, 0x660430 },
874		{ 0x0434, 0x660434 },
875		{ 0x0438, 0x660438 },
876		{ 0x0440, 0x660440 },
877		{ 0x0444, 0x660444 },
878		{ 0x0448, 0x660448 },
879		{ 0x044c, 0x66044c },
880		{ 0x0450, 0x660450 },
881		{ 0x0454, 0x660454 },
882		{ 0x0458, 0x660458 },
883		{ 0x045c, 0x66045c },
884		{ 0x0460, 0x660460 },
885		{ 0x0468, 0x660468 },
886		{ 0x046c, 0x66046c },
887		{ 0x0470, 0x660470 },
888		{ 0x0474, 0x660474 },
889		{ 0x0480, 0x660480 },
890		{ 0x0484, 0x660484 },
891		{ 0x048c, 0x66048c },
892		{ 0x0490, 0x660490 },
893		{ 0x0494, 0x660494 },
894		{ 0x0498, 0x660498 },
895		{ 0x04b0, 0x6604b0 },
896		{ 0x04b8, 0x6604b8 },
897		{ 0x04bc, 0x6604bc },
898		{ 0x04c0, 0x6604c0 },
899		{ 0x04c4, 0x6604c4 },
900		{ 0x04c8, 0x6604c8 },
901		{ 0x04d0, 0x6604d0 },
902		{ 0x04d4, 0x6604d4 },
903		{ 0x04e0, 0x6604e0 },
904		{ 0x04e4, 0x6604e4 },
905		{ 0x04e8, 0x6604e8 },
906		{ 0x04ec, 0x6604ec },
907		{ 0x04f0, 0x6604f0 },
908		{ 0x04f4, 0x6604f4 },
909		{ 0x04f8, 0x6604f8 },
910		{ 0x04fc, 0x6604fc },
911		{ 0x0500, 0x660500 },
912		{ 0x0504, 0x660504 },
913		{ 0x0508, 0x660508 },
914		{ 0x050c, 0x66050c },
915		{ 0x0510, 0x660510 },
916		{ 0x0514, 0x660514 },
917		{ 0x0518, 0x660518 },
918		{ 0x051c, 0x66051c },
919		{ 0x052c, 0x66052c },
920		{ 0x0530, 0x660530 },
921		{ 0x054c, 0x66054c },
922		{ 0x0550, 0x660550 },
923		{ 0x0554, 0x660554 },
924		{ 0x0558, 0x660558 },
925		{ 0x055c, 0x66055c },
926		{}
927	}
928};
929
930static const struct nvkm_disp_chan_mthd
931gf119_disp_core_mthd = {
932	.name = "Core",
933	.addr = 0x000000,
934	.prev = -0x020000,
935	.data = {
936		{ "Global", 1, &gf119_disp_core_mthd_base },
937		{    "DAC", 3, &gf119_disp_core_mthd_dac  },
938		{    "SOR", 8, &gf119_disp_core_mthd_sor  },
939		{   "PIOR", 4, &gf119_disp_core_mthd_pior },
940		{   "HEAD", 4, &gf119_disp_core_mthd_head },
941		{}
942	}
943};
944
945void
946gf119_disp_core_fini(struct nvkm_disp_chan *chan)
947{
948	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
949	struct nvkm_device *device = subdev->device;
950
951	/* deactivate channel */
952	nvkm_mask(device, 0x610490, 0x00000010, 0x00000000);
953	nvkm_mask(device, 0x610490, 0x00000003, 0x00000000);
954	if (nvkm_msec(device, 2000,
955		if (!(nvkm_rd32(device, 0x610490) & 0x001e0000))
956			break;
957	) < 0) {
958		nvkm_error(subdev, "core fini: %08x\n",
959			   nvkm_rd32(device, 0x610490));
960	}
961
962	chan->suspend_put = nvkm_rd32(device, 0x640000);
963}
964
965static int
966gf119_disp_core_init(struct nvkm_disp_chan *chan)
967{
968	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
969	struct nvkm_device *device = subdev->device;
970
971	/* initialise channel for dma command submission */
972	nvkm_wr32(device, 0x610494, chan->push);
973	nvkm_wr32(device, 0x610498, 0x00010000);
974	nvkm_wr32(device, 0x61049c, 0x00000001);
975	nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
976	nvkm_wr32(device, 0x640000, chan->suspend_put);
977	nvkm_wr32(device, 0x610490, 0x01000013);
978
979	/* wait for it to go inactive */
980	if (nvkm_msec(device, 2000,
981		if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
982			break;
983	) < 0) {
984		nvkm_error(subdev, "core init: %08x\n",
985			   nvkm_rd32(device, 0x610490));
986		return -EBUSY;
987	}
988
989	return 0;
990}
991
992const struct nvkm_disp_chan_func
993gf119_disp_core_func = {
994	.push = nv50_disp_dmac_push,
995	.init = gf119_disp_core_init,
996	.fini = gf119_disp_core_fini,
997	.intr = gf119_disp_chan_intr,
998	.user = nv50_disp_chan_user,
999	.bind = gf119_disp_dmac_bind,
1000};
1001
1002static const struct nvkm_disp_chan_user
1003gf119_disp_core = {
1004	.func = &gf119_disp_core_func,
1005	.ctrl = 0,
1006	.user = 0,
1007	.mthd = &gf119_disp_core_mthd,
1008};
1009
1010void
1011gf119_disp_super(struct work_struct *work)
1012{
1013	struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
1014	struct nvkm_subdev *subdev = &disp->engine.subdev;
1015	struct nvkm_device *device = subdev->device;
1016	struct nvkm_head *head;
1017	u32 mask[4];
1018
1019	nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super.pending));
1020	mutex_lock(&disp->super.mutex);
1021
1022	list_for_each_entry(head, &disp->heads, head) {
1023		mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
1024		HEAD_DBG(head, "%08x", mask[head->id]);
1025	}
1026
1027	if (disp->super.pending & 0x00000001) {
1028		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
1029		nv50_disp_super_1(disp);
1030		list_for_each_entry(head, &disp->heads, head) {
1031			if (!(mask[head->id] & 0x00001000))
1032				continue;
1033			nv50_disp_super_1_0(disp, head);
1034		}
1035	} else
1036	if (disp->super.pending & 0x00000002) {
1037		list_for_each_entry(head, &disp->heads, head) {
1038			if (!(mask[head->id] & 0x00001000))
1039				continue;
1040			nv50_disp_super_2_0(disp, head);
1041		}
1042		list_for_each_entry(head, &disp->heads, head) {
1043			if (!(mask[head->id] & 0x00010000))
1044				continue;
1045			nv50_disp_super_2_1(disp, head);
1046		}
1047		list_for_each_entry(head, &disp->heads, head) {
1048			if (!(mask[head->id] & 0x00001000))
1049				continue;
1050			nv50_disp_super_2_2(disp, head);
1051		}
1052	} else
1053	if (disp->super.pending & 0x00000004) {
1054		list_for_each_entry(head, &disp->heads, head) {
1055			if (!(mask[head->id] & 0x00001000))
1056				continue;
1057			nv50_disp_super_3_0(disp, head);
1058		}
1059	}
1060
1061	list_for_each_entry(head, &disp->heads, head)
1062		nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
1063
1064	nvkm_wr32(device, 0x6101d0, 0x80000000);
1065	mutex_unlock(&disp->super.mutex);
1066}
1067
1068void
1069gf119_disp_intr_error(struct nvkm_disp *disp, int chid)
1070{
1071	struct nvkm_subdev *subdev = &disp->engine.subdev;
1072	struct nvkm_device *device = subdev->device;
1073	u32 stat = nvkm_rd32(device, 0x6101f0 + (chid * 12));
1074	u32 type = (stat & 0x00007000) >> 12;
1075	u32 mthd = (stat & 0x00000ffc);
1076	u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
1077	u32 code = nvkm_rd32(device, 0x6101f8 + (chid * 12));
1078	const struct nvkm_enum *reason =
1079		nvkm_enum_find(nv50_disp_intr_error_type, type);
1080
1081	nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
1082			   "data %08x code %08x\n",
1083		   chid, stat, type, reason ? reason->name : "",
1084		   mthd, data, code);
1085
1086	if (chid < ARRAY_SIZE(disp->chan)) {
1087		switch (mthd) {
1088		case 0x0080:
1089			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
1090			break;
1091		default:
1092			break;
1093		}
1094	}
1095
1096	nvkm_wr32(device, 0x61009c, (1 << chid));
1097	nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
1098}
1099
1100void
1101gf119_disp_intr(struct nvkm_disp *disp)
1102{
1103	struct nvkm_subdev *subdev = &disp->engine.subdev;
1104	struct nvkm_device *device = subdev->device;
1105	struct nvkm_head *head;
1106	u32 intr = nvkm_rd32(device, 0x610088);
1107
1108	if (intr & 0x00000001) {
1109		u32 stat = nvkm_rd32(device, 0x61008c);
1110		while (stat) {
1111			int chid = __ffs(stat); stat &= ~(1 << chid);
1112			nv50_disp_chan_uevent_send(disp, chid);
1113			nvkm_wr32(device, 0x61008c, 1 << chid);
1114		}
1115		intr &= ~0x00000001;
1116	}
1117
1118	if (intr & 0x00000002) {
1119		u32 stat = nvkm_rd32(device, 0x61009c);
1120		int chid = ffs(stat) - 1;
1121		if (chid >= 0)
1122			disp->func->intr_error(disp, chid);
1123		intr &= ~0x00000002;
1124	}
1125
1126	if (intr & 0x00100000) {
1127		u32 stat = nvkm_rd32(device, 0x6100ac);
1128		if (stat & 0x00000007) {
1129			disp->super.pending = (stat & 0x00000007);
1130			queue_work(disp->super.wq, &disp->super.work);
1131			nvkm_wr32(device, 0x6100ac, disp->super.pending);
1132			stat &= ~0x00000007;
1133		}
1134
1135		if (stat) {
1136			nvkm_warn(subdev, "intr24 %08x\n", stat);
1137			nvkm_wr32(device, 0x6100ac, stat);
1138		}
1139
1140		intr &= ~0x00100000;
1141	}
1142
1143	list_for_each_entry(head, &disp->heads, head) {
1144		const u32 hoff = head->id * 0x800;
1145		u32 mask = 0x01000000 << head->id;
1146		if (mask & intr) {
1147			u32 stat = nvkm_rd32(device, 0x6100bc + hoff);
1148			if (stat & 0x00000001)
1149				nvkm_disp_vblank(disp, head->id);
1150			nvkm_mask(device, 0x6100bc + hoff, 0, 0);
1151			nvkm_rd32(device, 0x6100c0 + hoff);
1152		}
1153	}
1154}
1155
1156void
1157gf119_disp_fini(struct nvkm_disp *disp, bool suspend)
1158{
1159	struct nvkm_device *device = disp->engine.subdev.device;
1160	/* disable all interrupts */
1161	nvkm_wr32(device, 0x6100b0, 0x00000000);
1162}
1163
1164int
1165gf119_disp_init(struct nvkm_disp *disp)
1166{
1167	struct nvkm_device *device = disp->engine.subdev.device;
1168	struct nvkm_head *head;
1169	u32 tmp;
1170	int i;
1171
1172	/* The below segments of code copying values from one register to
1173	 * another appear to inform EVO of the display capabilities or
1174	 * something similar.
1175	 */
1176
1177	/* ... CRTC caps */
1178	list_for_each_entry(head, &disp->heads, head) {
1179		const u32 hoff = head->id * 0x800;
1180		tmp = nvkm_rd32(device, 0x616104 + hoff);
1181		nvkm_wr32(device, 0x6101b4 + hoff, tmp);
1182		tmp = nvkm_rd32(device, 0x616108 + hoff);
1183		nvkm_wr32(device, 0x6101b8 + hoff, tmp);
1184		tmp = nvkm_rd32(device, 0x61610c + hoff);
1185		nvkm_wr32(device, 0x6101bc + hoff, tmp);
1186	}
1187
1188	/* ... DAC caps */
1189	for (i = 0; i < disp->dac.nr; i++) {
1190		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
1191		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
1192	}
1193
1194	/* ... SOR caps */
1195	for (i = 0; i < disp->sor.nr; i++) {
1196		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
1197		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
1198	}
1199
1200	/* steal display away from vbios, or something like that */
1201	if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
1202		nvkm_wr32(device, 0x6100ac, 0x00000100);
1203		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1204		if (nvkm_msec(device, 2000,
1205			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
1206				break;
1207		) < 0)
1208			return -EBUSY;
1209	}
1210
1211	/* point at display engine memory area (hash table, objects) */
1212	nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
1213
1214	/* enable supervisor interrupts, disable everything else */
1215	nvkm_wr32(device, 0x610090, 0x00000000);
1216	nvkm_wr32(device, 0x6100a0, 0x00000000);
1217	nvkm_wr32(device, 0x6100b0, 0x00000307);
1218
1219	/* disable underflow reporting, preventing an intermittent issue
1220	 * on some gk104 boards where the production vbios left this
1221	 * setting enabled by default.
1222	 *
1223	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
1224	 */
1225	list_for_each_entry(head, &disp->heads, head) {
1226		const u32 hoff = head->id * 0x800;
1227		nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
1228	}
1229
1230	return 0;
1231}
1232
1233static const struct nvkm_disp_func
1234gf119_disp = {
1235	.oneinit = nv50_disp_oneinit,
1236	.init = gf119_disp_init,
1237	.fini = gf119_disp_fini,
1238	.intr = gf119_disp_intr,
1239	.intr_error = gf119_disp_intr_error,
1240	.super = gf119_disp_super,
1241	.uevent = &gf119_disp_chan_uevent,
1242	.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
1243	.dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
1244	.sor = { .cnt = gf119_sor_cnt, .new = gf119_sor_new },
1245	.root = { 0,0,GF110_DISP },
1246	.user = {
1247		{{0,0,GF110_DISP_CURSOR             }, nvkm_disp_chan_new, &gf119_disp_curs },
1248		{{0,0,GF110_DISP_OVERLAY            }, nvkm_disp_chan_new, &gf119_disp_oimm },
1249		{{0,0,GF110_DISP_BASE_CHANNEL_DMA   }, nvkm_disp_chan_new, &gf119_disp_base },
1250		{{0,0,GF110_DISP_CORE_CHANNEL_DMA   }, nvkm_disp_core_new, &gf119_disp_core },
1251		{{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gf119_disp_ovly },
1252		{}
1253	},
1254};
1255
1256int
1257gf119_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
1258	       struct nvkm_disp **pdisp)
1259{
1260	return nvkm_disp_new_(&gf119_disp, device, type, inst, pdisp);
1261}
1262