1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include "chan.h"
26#include "conn.h"
27#include "head.h"
28#include "dp.h"
29#include "ior.h"
30#include "outp.h"
31
32#include <core/client.h>
33#include <core/ramht.h>
34#include <subdev/bios.h>
35#include <subdev/bios/disp.h>
36#include <subdev/bios/init.h>
37#include <subdev/bios/pll.h>
38#include <subdev/devinit.h>
39#include <subdev/i2c.h>
40#include <subdev/mmu.h>
41#include <subdev/timer.h>
42
43#include <nvif/class.h>
44#include <nvif/unpack.h>
45
46static void
47nv50_pior_clock(struct nvkm_ior *pior)
48{
49	struct nvkm_device *device = pior->disp->engine.subdev.device;
50	const u32 poff = nv50_ior_base(pior);
51
52	nvkm_mask(device, 0x614380 + poff, 0x00000707, 0x00000001);
53}
54
55static int
56nv50_pior_dp_links(struct nvkm_ior *pior, struct nvkm_i2c_aux *aux)
57{
58	int ret = nvkm_i2c_aux_lnk_ctl(aux, pior->dp.nr, pior->dp.bw, pior->dp.ef);
59	if (ret)
60		return ret;
61
62	return 1;
63}
64
65static const struct nvkm_ior_func_dp
66nv50_pior_dp = {
67	.links = nv50_pior_dp_links,
68};
69
70static void
71nv50_pior_power_wait(struct nvkm_device *device, u32 poff)
72{
73	nvkm_msec(device, 2000,
74		if (!(nvkm_rd32(device, 0x61e004 + poff) & 0x80000000))
75			break;
76	);
77}
78
79static void
80nv50_pior_power(struct nvkm_ior *pior, bool normal, bool pu, bool data, bool vsync, bool hsync)
81{
82	struct nvkm_device *device = pior->disp->engine.subdev.device;
83	const u32  poff = nv50_ior_base(pior);
84	const u32 shift = normal ? 0 : 16;
85	const u32 state = 0x80000000 | (0x00000001 * !!pu) << shift;
86	const u32 field = 0x80000000 | (0x00000101 << shift);
87
88	nv50_pior_power_wait(device, poff);
89	nvkm_mask(device, 0x61e004 + poff, field, state);
90	nv50_pior_power_wait(device, poff);
91}
92
93void
94nv50_pior_depth(struct nvkm_ior *ior, struct nvkm_ior_state *state, u32 ctrl)
95{
96	/* GF119 moves this information to per-head methods, which is
97	 * a lot more convenient, and where our shared code expect it.
98	 */
99	if (state->head && state == &ior->asy) {
100		struct nvkm_head *head = nvkm_head_find(ior->disp, __ffs(state->head));
101
102		if (!WARN_ON(!head)) {
103			struct nvkm_head_state *state = &head->asy;
104			switch ((ctrl & 0x000f0000) >> 16) {
105			case 6: state->or.depth = 30; break;
106			case 5: state->or.depth = 24; break;
107			case 2: state->or.depth = 18; break;
108			case 0: state->or.depth = 18; break; /*XXX*/
109			default:
110				state->or.depth = 18;
111				WARN_ON(1);
112				break;
113			}
114		}
115	}
116}
117
118static void
119nv50_pior_state(struct nvkm_ior *pior, struct nvkm_ior_state *state)
120{
121	struct nvkm_device *device = pior->disp->engine.subdev.device;
122	const u32 coff = pior->id * 8 + (state == &pior->arm) * 4;
123	u32 ctrl = nvkm_rd32(device, 0x610b80 + coff);
124
125	state->proto_evo = (ctrl & 0x00000f00) >> 8;
126	state->rgdiv = 1;
127	switch (state->proto_evo) {
128	case 0: state->proto = TMDS; break;
129	default:
130		state->proto = UNKNOWN;
131		break;
132	}
133
134	state->head = ctrl & 0x00000003;
135	nv50_pior_depth(pior, state, ctrl);
136}
137
138static const struct nvkm_ior_func
139nv50_pior = {
140	.state = nv50_pior_state,
141	.power = nv50_pior_power,
142	.clock = nv50_pior_clock,
143	.dp = &nv50_pior_dp,
144};
145
146int
147nv50_pior_new(struct nvkm_disp *disp, int id)
148{
149	return nvkm_ior_new_(&nv50_pior, disp, PIOR, id, false);
150}
151
152int
153nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask)
154{
155	struct nvkm_device *device = disp->engine.subdev.device;
156
157	*pmask = (nvkm_rd32(device, 0x610184) & 0x70000000) >> 28;
158	return 3;
159}
160
161static int
162nv50_sor_bl_set(struct nvkm_ior *ior, int lvl)
163{
164	struct nvkm_device *device = ior->disp->engine.subdev.device;
165	const u32 soff = nv50_ior_base(ior);
166	u32 div = 1025;
167	u32 val = (lvl * div) / 100;
168
169	nvkm_wr32(device, 0x61c084 + soff, 0x80000000 | val);
170	return 0;
171}
172
173static int
174nv50_sor_bl_get(struct nvkm_ior *ior)
175{
176	struct nvkm_device *device = ior->disp->engine.subdev.device;
177	const u32 soff = nv50_ior_base(ior);
178	u32 div = 1025;
179	u32 val;
180
181	val  = nvkm_rd32(device, 0x61c084 + soff);
182	val &= 0x000007ff;
183	return ((val * 100) + (div / 2)) / div;
184}
185
186const struct nvkm_ior_func_bl
187nv50_sor_bl = {
188	.get = nv50_sor_bl_get,
189	.set = nv50_sor_bl_set,
190};
191
192void
193nv50_sor_clock(struct nvkm_ior *sor)
194{
195	struct nvkm_device *device = sor->disp->engine.subdev.device;
196	const int  div = sor->asy.link == 3;
197	const u32 soff = nv50_ior_base(sor);
198
199	nvkm_mask(device, 0x614300 + soff, 0x00000707, (div << 8) | div);
200}
201
202static void
203nv50_sor_power_wait(struct nvkm_device *device, u32 soff)
204{
205	nvkm_msec(device, 2000,
206		if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
207			break;
208	);
209}
210
211void
212nv50_sor_power(struct nvkm_ior *sor, bool normal, bool pu, bool data, bool vsync, bool hsync)
213{
214	struct nvkm_device *device = sor->disp->engine.subdev.device;
215	const u32  soff = nv50_ior_base(sor);
216	const u32 shift = normal ? 0 : 16;
217	const u32 state = 0x80000000 | (0x00000001 * !!pu) << shift;
218	const u32 field = 0x80000000 | (0x00000001 << shift);
219
220	nv50_sor_power_wait(device, soff);
221	nvkm_mask(device, 0x61c004 + soff, field, state);
222	nv50_sor_power_wait(device, soff);
223
224	nvkm_msec(device, 2000,
225		if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
226			break;
227	);
228}
229
230void
231nv50_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
232{
233	struct nvkm_device *device = sor->disp->engine.subdev.device;
234	const u32 coff = sor->id * 8 + (state == &sor->arm) * 4;
235	u32 ctrl = nvkm_rd32(device, 0x610b70 + coff);
236
237	state->proto_evo = (ctrl & 0x00000f00) >> 8;
238	switch (state->proto_evo) {
239	case 0: state->proto = LVDS; state->link = 1; break;
240	case 1: state->proto = TMDS; state->link = 1; break;
241	case 2: state->proto = TMDS; state->link = 2; break;
242	case 5: state->proto = TMDS; state->link = 3; break;
243	default:
244		state->proto = UNKNOWN;
245		break;
246	}
247
248	state->head = ctrl & 0x00000003;
249}
250
251static const struct nvkm_ior_func
252nv50_sor = {
253	.state = nv50_sor_state,
254	.power = nv50_sor_power,
255	.clock = nv50_sor_clock,
256	.bl = &nv50_sor_bl,
257};
258
259static int
260nv50_sor_new(struct nvkm_disp *disp, int id)
261{
262	return nvkm_ior_new_(&nv50_sor, disp, SOR, id, false);
263}
264
265int
266nv50_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
267{
268	struct nvkm_device *device = disp->engine.subdev.device;
269
270	*pmask = (nvkm_rd32(device, 0x610184) & 0x03000000) >> 24;
271	return 2;
272}
273
274static void
275nv50_dac_clock(struct nvkm_ior *dac)
276{
277	struct nvkm_device *device = dac->disp->engine.subdev.device;
278	const u32 doff = nv50_ior_base(dac);
279
280	nvkm_mask(device, 0x614280 + doff, 0x07070707, 0x00000000);
281}
282
283int
284nv50_dac_sense(struct nvkm_ior *dac, u32 loadval)
285{
286	struct nvkm_device *device = dac->disp->engine.subdev.device;
287	const u32 doff = nv50_ior_base(dac);
288
289	dac->func->power(dac, false, true, false, false, false);
290
291	nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
292	mdelay(9);
293	udelay(500);
294	loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
295
296	dac->func->power(dac, false, false, false, false, false);
297	if (!(loadval & 0x80000000))
298		return -ETIMEDOUT;
299
300	return (loadval & 0x38000000) >> 27;
301}
302
303static void
304nv50_dac_power_wait(struct nvkm_device *device, const u32 doff)
305{
306	nvkm_msec(device, 2000,
307		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
308			break;
309	);
310}
311
312void
313nv50_dac_power(struct nvkm_ior *dac, bool normal, bool pu, bool data, bool vsync, bool hsync)
314{
315	struct nvkm_device *device = dac->disp->engine.subdev.device;
316	const u32  doff = nv50_ior_base(dac);
317	const u32 shift = normal ? 0 : 16;
318	const u32 state = 0x80000000 | (0x00000040 * !    pu |
319					0x00000010 * !  data |
320					0x00000004 * ! vsync |
321					0x00000001 * ! hsync) << shift;
322	const u32 field = 0xc0000000 | (0x00000055 << shift);
323
324	nv50_dac_power_wait(device, doff);
325	nvkm_mask(device, 0x61a004 + doff, field, state);
326	nv50_dac_power_wait(device, doff);
327}
328
329static void
330nv50_dac_state(struct nvkm_ior *dac, struct nvkm_ior_state *state)
331{
332	struct nvkm_device *device = dac->disp->engine.subdev.device;
333	const u32 coff = dac->id * 8 + (state == &dac->arm) * 4;
334	u32 ctrl = nvkm_rd32(device, 0x610b58 + coff);
335
336	state->proto_evo = (ctrl & 0x00000f00) >> 8;
337	switch (state->proto_evo) {
338	case 0: state->proto = CRT; break;
339	default:
340		state->proto = UNKNOWN;
341		break;
342	}
343
344	state->head = ctrl & 0x00000003;
345}
346
347static const struct nvkm_ior_func
348nv50_dac = {
349	.state = nv50_dac_state,
350	.power = nv50_dac_power,
351	.sense = nv50_dac_sense,
352	.clock = nv50_dac_clock,
353};
354
355int
356nv50_dac_new(struct nvkm_disp *disp, int id)
357{
358	return nvkm_ior_new_(&nv50_dac, disp, DAC, id, false);
359}
360
361int
362nv50_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
363{
364	struct nvkm_device *device = disp->engine.subdev.device;
365
366	*pmask = (nvkm_rd32(device, 0x610184) & 0x00700000) >> 20;
367	return 3;
368}
369
370static void
371nv50_head_vblank_put(struct nvkm_head *head)
372{
373	struct nvkm_device *device = head->disp->engine.subdev.device;
374
375	nvkm_mask(device, 0x61002c, (4 << head->id), 0);
376}
377
378static void
379nv50_head_vblank_get(struct nvkm_head *head)
380{
381	struct nvkm_device *device = head->disp->engine.subdev.device;
382
383	nvkm_mask(device, 0x61002c, (4 << head->id), (4 << head->id));
384}
385
386static void
387nv50_head_rgclk(struct nvkm_head *head, int div)
388{
389	struct nvkm_device *device = head->disp->engine.subdev.device;
390
391	nvkm_mask(device, 0x614200 + (head->id * 0x800), 0x0000000f, div);
392}
393
394void
395nv50_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline)
396{
397	struct nvkm_device *device = head->disp->engine.subdev.device;
398	const u32 hoff = head->id * 0x800;
399
400	/* vline read locks hline. */
401	*vline = nvkm_rd32(device, 0x616340 + hoff) & 0x0000ffff;
402	*hline = nvkm_rd32(device, 0x616344 + hoff) & 0x0000ffff;
403}
404
405static void
406nv50_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
407{
408	struct nvkm_device *device = head->disp->engine.subdev.device;
409	const u32 hoff = head->id * 0x540 + (state == &head->arm) * 4;
410	u32 data;
411
412	data = nvkm_rd32(device, 0x610ae8 + hoff);
413	state->vblanke = (data & 0xffff0000) >> 16;
414	state->hblanke = (data & 0x0000ffff);
415	data = nvkm_rd32(device, 0x610af0 + hoff);
416	state->vblanks = (data & 0xffff0000) >> 16;
417	state->hblanks = (data & 0x0000ffff);
418	data = nvkm_rd32(device, 0x610af8 + hoff);
419	state->vtotal = (data & 0xffff0000) >> 16;
420	state->htotal = (data & 0x0000ffff);
421	data = nvkm_rd32(device, 0x610b00 + hoff);
422	state->vsynce = (data & 0xffff0000) >> 16;
423	state->hsynce = (data & 0x0000ffff);
424	state->hz = (nvkm_rd32(device, 0x610ad0 + hoff) & 0x003fffff) * 1000;
425}
426
427static const struct nvkm_head_func
428nv50_head = {
429	.state = nv50_head_state,
430	.rgpos = nv50_head_rgpos,
431	.rgclk = nv50_head_rgclk,
432	.vblank_get = nv50_head_vblank_get,
433	.vblank_put = nv50_head_vblank_put,
434};
435
436int
437nv50_head_new(struct nvkm_disp *disp, int id)
438{
439	return nvkm_head_new_(&nv50_head, disp, id);
440}
441
442int
443nv50_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
444{
445	*pmask = 3;
446	return 2;
447}
448
449
450static void
451nv50_disp_mthd_list(struct nvkm_disp *disp, int debug, u32 base, int c,
452		    const struct nvkm_disp_mthd_list *list, int inst)
453{
454	struct nvkm_subdev *subdev = &disp->engine.subdev;
455	struct nvkm_device *device = subdev->device;
456	int i;
457
458	for (i = 0; list->data[i].mthd; i++) {
459		if (list->data[i].addr) {
460			u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
461			u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
462			u32 mthd = list->data[i].mthd + (list->mthd * inst);
463			const char *name = list->data[i].name;
464			char mods[16];
465
466			if (prev != next)
467				snprintf(mods, sizeof(mods), "-> %08x", next);
468			else
469				snprintf(mods, sizeof(mods), "%13c", ' ');
470
471			nvkm_printk_(subdev, debug, info,
472				     "\t%04x: %08x %s%s%s\n",
473				     mthd, prev, mods, name ? " // " : "",
474				     name ? name : "");
475		}
476	}
477}
478
479void
480nv50_disp_chan_mthd(struct nvkm_disp_chan *chan, int debug)
481{
482	struct nvkm_disp *disp = chan->disp;
483	struct nvkm_subdev *subdev = &disp->engine.subdev;
484	const struct nvkm_disp_chan_mthd *mthd = chan->mthd;
485	const struct nvkm_disp_mthd_list *list;
486	int i, j;
487
488	if (debug > subdev->debug)
489		return;
490	if (!mthd)
491		return;
492
493	for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
494		u32 base = chan->head * mthd->addr;
495		for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
496			const char *cname = mthd->name;
497			const char *sname = "";
498			char cname_[16], sname_[16];
499
500			if (mthd->addr) {
501				snprintf(cname_, sizeof(cname_), "%s %d",
502					 mthd->name, chan->chid.user);
503				cname = cname_;
504			}
505
506			if (mthd->data[i].nr > 1) {
507				snprintf(sname_, sizeof(sname_), " - %s %d",
508					 mthd->data[i].name, j);
509				sname = sname_;
510			}
511
512			nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
513			nv50_disp_mthd_list(disp, debug, base, mthd->prev,
514					    list, j);
515		}
516	}
517}
518
519static void
520nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
521{
522	struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
523	struct nvkm_device *device = disp->engine.subdev.device;
524	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
525	nvkm_wr32(device, 0x610020, 0x00000001 << index);
526}
527
528static void
529nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
530{
531	struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
532	struct nvkm_device *device = disp->engine.subdev.device;
533	nvkm_wr32(device, 0x610020, 0x00000001 << index);
534	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
535}
536
537void
538nv50_disp_chan_uevent_send(struct nvkm_disp *disp, int chid)
539{
540	nvkm_event_ntfy(&disp->uevent, chid, NVKM_DISP_EVENT_CHAN_AWAKEN);
541}
542
543const struct nvkm_event_func
544nv50_disp_chan_uevent = {
545	.init = nv50_disp_chan_uevent_init,
546	.fini = nv50_disp_chan_uevent_fini,
547};
548
549u64
550nv50_disp_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
551{
552	*psize = 0x1000;
553	return 0x640000 + (chan->chid.user * 0x1000);
554}
555
556void
557nv50_disp_chan_intr(struct nvkm_disp_chan *chan, bool en)
558{
559	struct nvkm_device *device = chan->disp->engine.subdev.device;
560	const u32 mask = 0x00010001 << chan->chid.user;
561	const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
562	nvkm_mask(device, 0x610028, mask, data);
563}
564
565static void
566nv50_disp_pioc_fini(struct nvkm_disp_chan *chan)
567{
568	struct nvkm_disp *disp = chan->disp;
569	struct nvkm_subdev *subdev = &disp->engine.subdev;
570	struct nvkm_device *device = subdev->device;
571	int ctrl = chan->chid.ctrl;
572	int user = chan->chid.user;
573
574	nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
575	if (nvkm_msec(device, 2000,
576		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
577			break;
578	) < 0) {
579		nvkm_error(subdev, "ch %d timeout: %08x\n", user,
580			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
581	}
582}
583
584static int
585nv50_disp_pioc_init(struct nvkm_disp_chan *chan)
586{
587	struct nvkm_disp *disp = chan->disp;
588	struct nvkm_subdev *subdev = &disp->engine.subdev;
589	struct nvkm_device *device = subdev->device;
590	int ctrl = chan->chid.ctrl;
591	int user = chan->chid.user;
592
593	nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
594	if (nvkm_msec(device, 2000,
595		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
596			break;
597	) < 0) {
598		nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
599			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
600		return -EBUSY;
601	}
602
603	nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
604	if (nvkm_msec(device, 2000,
605		u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
606		if ((tmp & 0x00030000) == 0x00010000)
607			break;
608	) < 0) {
609		nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
610			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
611		return -EBUSY;
612	}
613
614	return 0;
615}
616
617const struct nvkm_disp_chan_func
618nv50_disp_pioc_func = {
619	.init = nv50_disp_pioc_init,
620	.fini = nv50_disp_pioc_fini,
621	.intr = nv50_disp_chan_intr,
622	.user = nv50_disp_chan_user,
623};
624
625int
626nv50_disp_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
627{
628	return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -10, handle,
629				 chan->chid.user << 28 | chan->chid.user);
630}
631
632static void
633nv50_disp_dmac_fini(struct nvkm_disp_chan *chan)
634{
635	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
636	struct nvkm_device *device = subdev->device;
637	int ctrl = chan->chid.ctrl;
638	int user = chan->chid.user;
639
640	/* deactivate channel */
641	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
642	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
643	if (nvkm_msec(device, 2000,
644		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
645			break;
646	) < 0) {
647		nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
648			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
649	}
650
651	chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
652}
653
654static int
655nv50_disp_dmac_init(struct nvkm_disp_chan *chan)
656{
657	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
658	struct nvkm_device *device = subdev->device;
659	int ctrl = chan->chid.ctrl;
660	int user = chan->chid.user;
661
662	/* initialise channel for dma command submission */
663	nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
664	nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
665	nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
666	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
667	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
668	nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
669
670	/* wait for it to go inactive */
671	if (nvkm_msec(device, 2000,
672		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
673			break;
674	) < 0) {
675		nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
676			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
677		return -EBUSY;
678	}
679
680	return 0;
681}
682
683int
684nv50_disp_dmac_push(struct nvkm_disp_chan *chan, u64 object)
685{
686	chan->memory = nvkm_umem_search(chan->object.client, object);
687	if (IS_ERR(chan->memory))
688		return PTR_ERR(chan->memory);
689
690	if (nvkm_memory_size(chan->memory) < 0x1000)
691		return -EINVAL;
692
693	switch (nvkm_memory_target(chan->memory)) {
694	case NVKM_MEM_TARGET_VRAM: chan->push = 0x00000001; break;
695	case NVKM_MEM_TARGET_NCOH: chan->push = 0x00000002; break;
696	case NVKM_MEM_TARGET_HOST: chan->push = 0x00000003; break;
697	default:
698		return -EINVAL;
699	}
700
701	chan->push |= nvkm_memory_addr(chan->memory) >> 8;
702	return 0;
703}
704
705const struct nvkm_disp_chan_func
706nv50_disp_dmac_func = {
707	.push = nv50_disp_dmac_push,
708	.init = nv50_disp_dmac_init,
709	.fini = nv50_disp_dmac_fini,
710	.intr = nv50_disp_chan_intr,
711	.user = nv50_disp_chan_user,
712	.bind = nv50_disp_dmac_bind,
713};
714
715const struct nvkm_disp_chan_user
716nv50_disp_curs = {
717	.func = &nv50_disp_pioc_func,
718	.ctrl = 7,
719	.user = 7,
720};
721
722const struct nvkm_disp_chan_user
723nv50_disp_oimm = {
724	.func = &nv50_disp_pioc_func,
725	.ctrl = 5,
726	.user = 5,
727};
728
729static const struct nvkm_disp_mthd_list
730nv50_disp_ovly_mthd_base = {
731	.mthd = 0x0000,
732	.addr = 0x000000,
733	.data = {
734		{ 0x0080, 0x000000 },
735		{ 0x0084, 0x0009a0 },
736		{ 0x0088, 0x0009c0 },
737		{ 0x008c, 0x0009c8 },
738		{ 0x0090, 0x6109b4 },
739		{ 0x0094, 0x610970 },
740		{ 0x00a0, 0x610998 },
741		{ 0x00a4, 0x610964 },
742		{ 0x00c0, 0x610958 },
743		{ 0x00e0, 0x6109a8 },
744		{ 0x00e4, 0x6109d0 },
745		{ 0x00e8, 0x6109d8 },
746		{ 0x0100, 0x61094c },
747		{ 0x0104, 0x610984 },
748		{ 0x0108, 0x61098c },
749		{ 0x0800, 0x6109f8 },
750		{ 0x0808, 0x610a08 },
751		{ 0x080c, 0x610a10 },
752		{ 0x0810, 0x610a00 },
753		{}
754	}
755};
756
757static const struct nvkm_disp_chan_mthd
758nv50_disp_ovly_mthd = {
759	.name = "Overlay",
760	.addr = 0x000540,
761	.prev = 0x000004,
762	.data = {
763		{ "Global", 1, &nv50_disp_ovly_mthd_base },
764		{}
765	}
766};
767
768static const struct nvkm_disp_chan_user
769nv50_disp_ovly = {
770	.func = &nv50_disp_dmac_func,
771	.ctrl = 3,
772	.user = 3,
773	.mthd = &nv50_disp_ovly_mthd,
774};
775
776static const struct nvkm_disp_mthd_list
777nv50_disp_base_mthd_base = {
778	.mthd = 0x0000,
779	.addr = 0x000000,
780	.data = {
781		{ 0x0080, 0x000000 },
782		{ 0x0084, 0x0008c4 },
783		{ 0x0088, 0x0008d0 },
784		{ 0x008c, 0x0008dc },
785		{ 0x0090, 0x0008e4 },
786		{ 0x0094, 0x610884 },
787		{ 0x00a0, 0x6108a0 },
788		{ 0x00a4, 0x610878 },
789		{ 0x00c0, 0x61086c },
790		{ 0x00e0, 0x610858 },
791		{ 0x00e4, 0x610860 },
792		{ 0x00e8, 0x6108ac },
793		{ 0x00ec, 0x6108b4 },
794		{ 0x0100, 0x610894 },
795		{ 0x0110, 0x6108bc },
796		{ 0x0114, 0x61088c },
797		{}
798	}
799};
800
801const struct nvkm_disp_mthd_list
802nv50_disp_base_mthd_image = {
803	.mthd = 0x0400,
804	.addr = 0x000000,
805	.data = {
806		{ 0x0800, 0x6108f0 },
807		{ 0x0804, 0x6108fc },
808		{ 0x0808, 0x61090c },
809		{ 0x080c, 0x610914 },
810		{ 0x0810, 0x610904 },
811		{}
812	}
813};
814
815static const struct nvkm_disp_chan_mthd
816nv50_disp_base_mthd = {
817	.name = "Base",
818	.addr = 0x000540,
819	.prev = 0x000004,
820	.data = {
821		{ "Global", 1, &nv50_disp_base_mthd_base },
822		{  "Image", 2, &nv50_disp_base_mthd_image },
823		{}
824	}
825};
826
827static const struct nvkm_disp_chan_user
828nv50_disp_base = {
829	.func = &nv50_disp_dmac_func,
830	.ctrl = 1,
831	.user = 1,
832	.mthd = &nv50_disp_base_mthd,
833};
834
835const struct nvkm_disp_mthd_list
836nv50_disp_core_mthd_base = {
837	.mthd = 0x0000,
838	.addr = 0x000000,
839	.data = {
840		{ 0x0080, 0x000000 },
841		{ 0x0084, 0x610bb8 },
842		{ 0x0088, 0x610b9c },
843		{ 0x008c, 0x000000 },
844		{}
845	}
846};
847
848static const struct nvkm_disp_mthd_list
849nv50_disp_core_mthd_dac = {
850	.mthd = 0x0080,
851	.addr = 0x000008,
852	.data = {
853		{ 0x0400, 0x610b58 },
854		{ 0x0404, 0x610bdc },
855		{ 0x0420, 0x610828 },
856		{}
857	}
858};
859
860const struct nvkm_disp_mthd_list
861nv50_disp_core_mthd_sor = {
862	.mthd = 0x0040,
863	.addr = 0x000008,
864	.data = {
865		{ 0x0600, 0x610b70 },
866		{}
867	}
868};
869
870const struct nvkm_disp_mthd_list
871nv50_disp_core_mthd_pior = {
872	.mthd = 0x0040,
873	.addr = 0x000008,
874	.data = {
875		{ 0x0700, 0x610b80 },
876		{}
877	}
878};
879
880static const struct nvkm_disp_mthd_list
881nv50_disp_core_mthd_head = {
882	.mthd = 0x0400,
883	.addr = 0x000540,
884	.data = {
885		{ 0x0800, 0x610ad8 },
886		{ 0x0804, 0x610ad0 },
887		{ 0x0808, 0x610a48 },
888		{ 0x080c, 0x610a78 },
889		{ 0x0810, 0x610ac0 },
890		{ 0x0814, 0x610af8 },
891		{ 0x0818, 0x610b00 },
892		{ 0x081c, 0x610ae8 },
893		{ 0x0820, 0x610af0 },
894		{ 0x0824, 0x610b08 },
895		{ 0x0828, 0x610b10 },
896		{ 0x082c, 0x610a68 },
897		{ 0x0830, 0x610a60 },
898		{ 0x0834, 0x000000 },
899		{ 0x0838, 0x610a40 },
900		{ 0x0840, 0x610a24 },
901		{ 0x0844, 0x610a2c },
902		{ 0x0848, 0x610aa8 },
903		{ 0x084c, 0x610ab0 },
904		{ 0x0860, 0x610a84 },
905		{ 0x0864, 0x610a90 },
906		{ 0x0868, 0x610b18 },
907		{ 0x086c, 0x610b20 },
908		{ 0x0870, 0x610ac8 },
909		{ 0x0874, 0x610a38 },
910		{ 0x0880, 0x610a58 },
911		{ 0x0884, 0x610a9c },
912		{ 0x08a0, 0x610a70 },
913		{ 0x08a4, 0x610a50 },
914		{ 0x08a8, 0x610ae0 },
915		{ 0x08c0, 0x610b28 },
916		{ 0x08c4, 0x610b30 },
917		{ 0x08c8, 0x610b40 },
918		{ 0x08d4, 0x610b38 },
919		{ 0x08d8, 0x610b48 },
920		{ 0x08dc, 0x610b50 },
921		{ 0x0900, 0x610a18 },
922		{ 0x0904, 0x610ab8 },
923		{}
924	}
925};
926
927static const struct nvkm_disp_chan_mthd
928nv50_disp_core_mthd = {
929	.name = "Core",
930	.addr = 0x000000,
931	.prev = 0x000004,
932	.data = {
933		{ "Global", 1, &nv50_disp_core_mthd_base },
934		{    "DAC", 3, &nv50_disp_core_mthd_dac  },
935		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
936		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
937		{   "HEAD", 2, &nv50_disp_core_mthd_head },
938		{}
939	}
940};
941
942static void
943nv50_disp_core_fini(struct nvkm_disp_chan *chan)
944{
945	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
946	struct nvkm_device *device = subdev->device;
947
948	/* deactivate channel */
949	nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
950	nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
951	if (nvkm_msec(device, 2000,
952		if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
953			break;
954	) < 0) {
955		nvkm_error(subdev, "core fini: %08x\n",
956			   nvkm_rd32(device, 0x610200));
957	}
958
959	chan->suspend_put = nvkm_rd32(device, 0x640000);
960}
961
962static int
963nv50_disp_core_init(struct nvkm_disp_chan *chan)
964{
965	struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
966	struct nvkm_device *device = subdev->device;
967
968	/* attempt to unstick channel from some unknown state */
969	if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
970		nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
971	if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
972		nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
973
974	/* initialise channel for dma command submission */
975	nvkm_wr32(device, 0x610204, chan->push);
976	nvkm_wr32(device, 0x610208, 0x00010000);
977	nvkm_wr32(device, 0x61020c, 0x00000000);
978	nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
979	nvkm_wr32(device, 0x640000, chan->suspend_put);
980	nvkm_wr32(device, 0x610200, 0x01000013);
981
982	/* wait for it to go inactive */
983	if (nvkm_msec(device, 2000,
984		if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
985			break;
986	) < 0) {
987		nvkm_error(subdev, "core init: %08x\n",
988			   nvkm_rd32(device, 0x610200));
989		return -EBUSY;
990	}
991
992	return 0;
993}
994
995const struct nvkm_disp_chan_func
996nv50_disp_core_func = {
997	.push = nv50_disp_dmac_push,
998	.init = nv50_disp_core_init,
999	.fini = nv50_disp_core_fini,
1000	.intr = nv50_disp_chan_intr,
1001	.user = nv50_disp_chan_user,
1002	.bind = nv50_disp_dmac_bind,
1003};
1004
1005static const struct nvkm_disp_chan_user
1006nv50_disp_core = {
1007	.func = &nv50_disp_core_func,
1008	.ctrl = 0,
1009	.user = 0,
1010	.mthd = &nv50_disp_core_mthd,
1011};
1012
1013static u32
1014nv50_disp_super_iedt(struct nvkm_head *head, struct nvkm_outp *outp,
1015		     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
1016		     struct nvbios_outp *iedt)
1017{
1018	struct nvkm_bios *bios = head->disp->engine.subdev.device->bios;
1019	const u8  l = ffs(outp->info.link);
1020	const u16 t = outp->info.hasht;
1021	const u16 m = (0x0100 << head->id) | (l << 6) | outp->info.or;
1022	u32 data = nvbios_outp_match(bios, t, m, ver, hdr, cnt, len, iedt);
1023	if (!data)
1024		OUTP_DBG(outp, "missing IEDT for %04x:%04x", t, m);
1025	return data;
1026}
1027
1028static void
1029nv50_disp_super_ied_on(struct nvkm_head *head,
1030		       struct nvkm_ior *ior, int id, u32 khz)
1031{
1032	struct nvkm_subdev *subdev = &head->disp->engine.subdev;
1033	struct nvkm_bios *bios = subdev->device->bios;
1034	struct nvkm_outp *outp = ior->asy.outp;
1035	struct nvbios_ocfg iedtrs;
1036	struct nvbios_outp iedt;
1037	u8  ver, hdr, cnt, len, flags = 0x00;
1038	u32 data;
1039
1040	if (!outp) {
1041		IOR_DBG(ior, "nothing to attach");
1042		return;
1043	}
1044
1045	/* Lookup IED table for the device. */
1046	data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
1047	if (!data)
1048		return;
1049
1050	/* Lookup IEDT runtime settings for the current configuration. */
1051	if (ior->type == SOR) {
1052		if (ior->asy.proto == LVDS) {
1053			if (head->asy.or.depth == 24)
1054				flags |= 0x02;
1055		}
1056		if (ior->asy.link == 3)
1057			flags |= 0x01;
1058	}
1059
1060	data = nvbios_ocfg_match(bios, data, ior->asy.proto_evo, flags,
1061				 &ver, &hdr, &cnt, &len, &iedtrs);
1062	if (!data) {
1063		OUTP_DBG(outp, "missing IEDT RS for %02x:%02x",
1064			 ior->asy.proto_evo, flags);
1065		return;
1066	}
1067
1068	/* Execute the OnInt[23] script for the current frequency. */
1069	data = nvbios_oclk_match(bios, iedtrs.clkcmp[id], khz);
1070	if (!data) {
1071		OUTP_DBG(outp, "missing IEDT RSS %d for %02x:%02x %d khz",
1072			 id, ior->asy.proto_evo, flags, khz);
1073		return;
1074	}
1075
1076	nvbios_init(subdev, data,
1077		init.outp = &outp->info;
1078		init.or   = ior->id;
1079		init.link = ior->asy.link;
1080		init.head = head->id;
1081	);
1082}
1083
1084static void
1085nv50_disp_super_ied_off(struct nvkm_head *head, struct nvkm_ior *ior, int id)
1086{
1087	struct nvkm_outp *outp = ior->arm.outp;
1088	struct nvbios_outp iedt;
1089	u8  ver, hdr, cnt, len;
1090	u32 data;
1091
1092	if (!outp) {
1093		IOR_DBG(ior, "nothing attached");
1094		return;
1095	}
1096
1097	data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
1098	if (!data)
1099		return;
1100
1101	nvbios_init(&head->disp->engine.subdev, iedt.script[id],
1102		init.outp = &outp->info;
1103		init.or   = ior->id;
1104		init.link = ior->arm.link;
1105		init.head = head->id;
1106	);
1107}
1108
1109static struct nvkm_ior *
1110nv50_disp_super_ior_asy(struct nvkm_head *head)
1111{
1112	struct nvkm_ior *ior;
1113	list_for_each_entry(ior, &head->disp->iors, head) {
1114		if (ior->asy.head & (1 << head->id)) {
1115			HEAD_DBG(head, "to %s", ior->name);
1116			return ior;
1117		}
1118	}
1119	HEAD_DBG(head, "nothing to attach");
1120	return NULL;
1121}
1122
1123static struct nvkm_ior *
1124nv50_disp_super_ior_arm(struct nvkm_head *head)
1125{
1126	struct nvkm_ior *ior;
1127	list_for_each_entry(ior, &head->disp->iors, head) {
1128		if (ior->arm.head & (1 << head->id)) {
1129			HEAD_DBG(head, "on %s", ior->name);
1130			return ior;
1131		}
1132	}
1133	HEAD_DBG(head, "nothing attached");
1134	return NULL;
1135}
1136
1137void
1138nv50_disp_super_3_0(struct nvkm_disp *disp, struct nvkm_head *head)
1139{
1140	struct nvkm_ior *ior;
1141
1142	/* Determine which OR, if any, we're attaching to the head. */
1143	HEAD_DBG(head, "supervisor 3.0");
1144	ior = nv50_disp_super_ior_asy(head);
1145	if (!ior)
1146		return;
1147
1148	/* Execute OnInt3 IED script. */
1149	nv50_disp_super_ied_on(head, ior, 1, head->asy.hz / 1000);
1150
1151	/* OR-specific handling. */
1152	if (ior->func->war_3)
1153		ior->func->war_3(ior);
1154}
1155
1156static void
1157nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
1158{
1159	struct nvkm_subdev *subdev = &head->disp->engine.subdev;
1160	const u32      khz = head->asy.hz / 1000;
1161	const u32 linkKBps = ior->dp.bw * 27000;
1162	const u32   symbol = 100000;
1163	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
1164	int TU, VTUi, VTUf, VTUa;
1165	u64 link_data_rate, link_ratio, unk;
1166	u32 best_diff = 64 * symbol;
1167	u64 h, v;
1168
1169	/* symbols/hblank - algorithm taken from comments in tegra driver */
1170	h = head->asy.hblanke + head->asy.htotal - head->asy.hblanks - 7;
1171	h = h * linkKBps;
1172	do_div(h, khz);
1173	h = h - (3 * ior->dp.ef) - (12 / ior->dp.nr);
1174
1175	/* symbols/vblank - algorithm taken from comments in tegra driver */
1176	v = head->asy.vblanks - head->asy.vblanke - 25;
1177	v = v * linkKBps;
1178	do_div(v, khz);
1179	v = v - ((36 / ior->dp.nr) + 3) - 1;
1180
1181	ior->func->dp->audio_sym(ior, head->id, h, v);
1182
1183	/* watermark / activesym */
1184	link_data_rate = (khz * head->asy.or.depth / 8) / ior->dp.nr;
1185
1186	/* calculate ratio of packed data rate to link symbol rate */
1187	link_ratio = link_data_rate * symbol;
1188	do_div(link_ratio, linkKBps);
1189
1190	for (TU = 64; ior->func->dp->activesym && TU >= 32; TU--) {
1191		/* calculate average number of valid symbols in each TU */
1192		u32 tu_valid = link_ratio * TU;
1193		u32 calc, diff;
1194
1195		/* find a hw representation for the fraction.. */
1196		VTUi = tu_valid / symbol;
1197		calc = VTUi * symbol;
1198		diff = tu_valid - calc;
1199		if (diff) {
1200			if (diff >= (symbol / 2)) {
1201				VTUf = symbol / (symbol - diff);
1202				if (symbol - (VTUf * diff))
1203					VTUf++;
1204
1205				if (VTUf <= 15) {
1206					VTUa  = 1;
1207					calc += symbol - (symbol / VTUf);
1208				} else {
1209					VTUa  = 0;
1210					VTUf  = 1;
1211					calc += symbol;
1212				}
1213			} else {
1214				VTUa  = 0;
1215				VTUf  = min((int)(symbol / diff), 15);
1216				calc += symbol / VTUf;
1217			}
1218
1219			diff = calc - tu_valid;
1220		} else {
1221			/* no remainder, but the hw doesn't like the fractional
1222			 * part to be zero.  decrement the integer part and
1223			 * have the fraction add a whole symbol back
1224			 */
1225			VTUa = 0;
1226			VTUf = 1;
1227			VTUi--;
1228		}
1229
1230		if (diff < best_diff) {
1231			best_diff = diff;
1232			bestTU = TU;
1233			bestVTUa = VTUa;
1234			bestVTUf = VTUf;
1235			bestVTUi = VTUi;
1236			if (diff == 0)
1237				break;
1238		}
1239	}
1240
1241	if (ior->func->dp->activesym) {
1242		if (!bestTU) {
1243			nvkm_error(subdev, "unable to determine dp config\n");
1244			return;
1245		}
1246
1247		ior->func->dp->activesym(ior, head->id, bestTU, bestVTUa, bestVTUf, bestVTUi);
1248	} else {
1249		bestTU = 64;
1250	}
1251
1252	/* XXX close to vbios numbers, but not right */
1253	unk  = (symbol - link_ratio) * bestTU;
1254	unk *= link_ratio;
1255	do_div(unk, symbol);
1256	do_div(unk, symbol);
1257	unk += 6;
1258
1259	ior->func->dp->watermark(ior, head->id, unk);
1260}
1261
1262void
1263nv50_disp_super_2_2(struct nvkm_disp *disp, struct nvkm_head *head)
1264{
1265	const u32 khz = head->asy.hz / 1000;
1266	struct nvkm_outp *outp;
1267	struct nvkm_ior *ior;
1268
1269	/* Determine which OR, if any, we're attaching from the head. */
1270	HEAD_DBG(head, "supervisor 2.2");
1271	ior = nv50_disp_super_ior_asy(head);
1272	if (!ior)
1273		return;
1274
1275	outp = ior->asy.outp;
1276
1277	/* For some reason, NVIDIA decided not to:
1278	 *
1279	 * A) Give dual-link LVDS a separate EVO protocol, like for TMDS.
1280	 *  and
1281	 * B) Use SetControlOutputResource.PixelDepth on LVDS.
1282	 *
1283	 * Override the values we usually read from HW with the same
1284	 * data we pass though an ioctl instead.
1285	 */
1286	if (outp && ior->type == SOR && ior->asy.proto == LVDS) {
1287		head->asy.or.depth = outp->lvds.bpc8 ? 24 : 18;
1288		ior->asy.link      = outp->lvds.dual ? 3 : 1;
1289	}
1290
1291	/* Execute OnInt2 IED script. */
1292	nv50_disp_super_ied_on(head, ior, 0, khz);
1293
1294	/* Program RG clock divider. */
1295	head->func->rgclk(head, ior->asy.rgdiv);
1296
1297	/* Mode-specific internal DP configuration. */
1298	if (ior->type == SOR && ior->asy.proto == DP)
1299		nv50_disp_super_2_2_dp(head, ior);
1300
1301	/* OR-specific handling. */
1302	ior->func->clock(ior);
1303	if (ior->func->war_2)
1304		ior->func->war_2(ior);
1305}
1306
1307void
1308nv50_disp_super_2_1(struct nvkm_disp *disp, struct nvkm_head *head)
1309{
1310	struct nvkm_devinit *devinit = disp->engine.subdev.device->devinit;
1311	const u32 khz = head->asy.hz / 1000;
1312	HEAD_DBG(head, "supervisor 2.1 - %d khz", khz);
1313	if (khz)
1314		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head->id, khz);
1315}
1316
1317void
1318nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head)
1319{
1320	struct nvkm_ior *ior;
1321
1322	/* Determine which OR, if any, we're detaching from the head. */
1323	HEAD_DBG(head, "supervisor 2.0");
1324	ior = nv50_disp_super_ior_arm(head);
1325	if (!ior)
1326		return;
1327
1328	/* Execute OffInt2 IED script. */
1329	nv50_disp_super_ied_off(head, ior, 2);
1330}
1331
1332void
1333nv50_disp_super_1_0(struct nvkm_disp *disp, struct nvkm_head *head)
1334{
1335	struct nvkm_ior *ior;
1336
1337	/* Determine which OR, if any, we're detaching from the head. */
1338	HEAD_DBG(head, "supervisor 1.0");
1339	ior = nv50_disp_super_ior_arm(head);
1340	if (!ior)
1341		return;
1342
1343	/* Execute OffInt1 IED script. */
1344	nv50_disp_super_ied_off(head, ior, 1);
1345}
1346
1347void
1348nv50_disp_super_1(struct nvkm_disp *disp)
1349{
1350	struct nvkm_head *head;
1351	struct nvkm_ior *ior;
1352
1353	list_for_each_entry(head, &disp->heads, head) {
1354		head->func->state(head, &head->arm);
1355		head->func->state(head, &head->asy);
1356	}
1357
1358	list_for_each_entry(ior, &disp->iors, head) {
1359		ior->func->state(ior, &ior->arm);
1360		ior->func->state(ior, &ior->asy);
1361	}
1362}
1363
1364void
1365nv50_disp_super(struct work_struct *work)
1366{
1367	struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
1368	struct nvkm_subdev *subdev = &disp->engine.subdev;
1369	struct nvkm_device *device = subdev->device;
1370	struct nvkm_head *head;
1371	u32 super;
1372
1373	mutex_lock(&disp->super.mutex);
1374	super = nvkm_rd32(device, 0x610030);
1375
1376	nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super.pending, super);
1377
1378	if (disp->super.pending & 0x00000010) {
1379		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
1380		nv50_disp_super_1(disp);
1381		list_for_each_entry(head, &disp->heads, head) {
1382			if (!(super & (0x00000020 << head->id)))
1383				continue;
1384			if (!(super & (0x00000080 << head->id)))
1385				continue;
1386			nv50_disp_super_1_0(disp, head);
1387		}
1388	} else
1389	if (disp->super.pending & 0x00000020) {
1390		list_for_each_entry(head, &disp->heads, head) {
1391			if (!(super & (0x00000080 << head->id)))
1392				continue;
1393			nv50_disp_super_2_0(disp, head);
1394		}
1395		list_for_each_entry(head, &disp->heads, head) {
1396			if (!(super & (0x00000200 << head->id)))
1397				continue;
1398			nv50_disp_super_2_1(disp, head);
1399		}
1400		list_for_each_entry(head, &disp->heads, head) {
1401			if (!(super & (0x00000080 << head->id)))
1402				continue;
1403			nv50_disp_super_2_2(disp, head);
1404		}
1405	} else
1406	if (disp->super.pending & 0x00000040) {
1407		list_for_each_entry(head, &disp->heads, head) {
1408			if (!(super & (0x00000080 << head->id)))
1409				continue;
1410			nv50_disp_super_3_0(disp, head);
1411		}
1412	}
1413
1414	nvkm_wr32(device, 0x610030, 0x80000000);
1415	mutex_unlock(&disp->super.mutex);
1416}
1417
1418const struct nvkm_enum
1419nv50_disp_intr_error_type[] = {
1420	{ 0, "NONE" },
1421	{ 1, "PUSHBUFFER_ERR" },
1422	{ 2, "TRAP" },
1423	{ 3, "RESERVED_METHOD" },
1424	{ 4, "INVALID_ARG" },
1425	{ 5, "INVALID_STATE" },
1426	{ 7, "UNRESOLVABLE_HANDLE" },
1427	{}
1428};
1429
1430static const struct nvkm_enum
1431nv50_disp_intr_error_code[] = {
1432	{ 0x00, "" },
1433	{}
1434};
1435
1436static void
1437nv50_disp_intr_error(struct nvkm_disp *disp, int chid)
1438{
1439	struct nvkm_subdev *subdev = &disp->engine.subdev;
1440	struct nvkm_device *device = subdev->device;
1441	u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
1442	u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
1443	u32 code = (addr & 0x00ff0000) >> 16;
1444	u32 type = (addr & 0x00007000) >> 12;
1445	u32 mthd = (addr & 0x00000ffc);
1446	const struct nvkm_enum *ec, *et;
1447
1448	et = nvkm_enum_find(nv50_disp_intr_error_type, type);
1449	ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
1450
1451	nvkm_error(subdev,
1452		   "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
1453		   type, et ? et->name : "", code, ec ? ec->name : "",
1454		   chid, mthd, data);
1455
1456	if (chid < ARRAY_SIZE(disp->chan)) {
1457		switch (mthd) {
1458		case 0x0080:
1459			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
1460			break;
1461		default:
1462			break;
1463		}
1464	}
1465
1466	nvkm_wr32(device, 0x610020, 0x00010000 << chid);
1467	nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
1468}
1469
1470void
1471nv50_disp_intr(struct nvkm_disp *disp)
1472{
1473	struct nvkm_device *device = disp->engine.subdev.device;
1474	u32 intr0 = nvkm_rd32(device, 0x610020);
1475	u32 intr1 = nvkm_rd32(device, 0x610024);
1476
1477	while (intr0 & 0x001f0000) {
1478		u32 chid = __ffs(intr0 & 0x001f0000) - 16;
1479		nv50_disp_intr_error(disp, chid);
1480		intr0 &= ~(0x00010000 << chid);
1481	}
1482
1483	while (intr0 & 0x0000001f) {
1484		u32 chid = __ffs(intr0 & 0x0000001f);
1485		nv50_disp_chan_uevent_send(disp, chid);
1486		intr0 &= ~(0x00000001 << chid);
1487	}
1488
1489	if (intr1 & 0x00000004) {
1490		nvkm_disp_vblank(disp, 0);
1491		nvkm_wr32(device, 0x610024, 0x00000004);
1492	}
1493
1494	if (intr1 & 0x00000008) {
1495		nvkm_disp_vblank(disp, 1);
1496		nvkm_wr32(device, 0x610024, 0x00000008);
1497	}
1498
1499	if (intr1 & 0x00000070) {
1500		disp->super.pending = (intr1 & 0x00000070);
1501		queue_work(disp->super.wq, &disp->super.work);
1502		nvkm_wr32(device, 0x610024, disp->super.pending);
1503	}
1504}
1505
1506void
1507nv50_disp_fini(struct nvkm_disp *disp, bool suspend)
1508{
1509	struct nvkm_device *device = disp->engine.subdev.device;
1510	/* disable all interrupts */
1511	nvkm_wr32(device, 0x610024, 0x00000000);
1512	nvkm_wr32(device, 0x610020, 0x00000000);
1513}
1514
1515int
1516nv50_disp_init(struct nvkm_disp *disp)
1517{
1518	struct nvkm_device *device = disp->engine.subdev.device;
1519	struct nvkm_head *head;
1520	u32 tmp;
1521	int i;
1522
1523	/* The below segments of code copying values from one register to
1524	 * another appear to inform EVO of the display capabilities or
1525	 * something similar.  NFI what the 0x614004 caps are for..
1526	 */
1527	tmp = nvkm_rd32(device, 0x614004);
1528	nvkm_wr32(device, 0x610184, tmp);
1529
1530	/* ... CRTC caps */
1531	list_for_each_entry(head, &disp->heads, head) {
1532		tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
1533		nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
1534		tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
1535		nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
1536		tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
1537		nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
1538		tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
1539		nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
1540	}
1541
1542	/* ... DAC caps */
1543	for (i = 0; i < disp->dac.nr; i++) {
1544		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
1545		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
1546	}
1547
1548	/* ... SOR caps */
1549	for (i = 0; i < disp->sor.nr; i++) {
1550		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
1551		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
1552	}
1553
1554	/* ... PIOR caps */
1555	for (i = 0; i < disp->pior.nr; i++) {
1556		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
1557		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
1558	}
1559
1560	/* steal display away from vbios, or something like that */
1561	if (nvkm_rd32(device, 0x610024) & 0x00000100) {
1562		nvkm_wr32(device, 0x610024, 0x00000100);
1563		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1564		if (nvkm_msec(device, 2000,
1565			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
1566				break;
1567		) < 0)
1568			return -EBUSY;
1569	}
1570
1571	/* point at display engine memory area (hash table, objects) */
1572	nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
1573
1574	/* enable supervisor interrupts, disable everything else */
1575	nvkm_wr32(device, 0x61002c, 0x00000370);
1576	nvkm_wr32(device, 0x610028, 0x00000000);
1577	return 0;
1578}
1579
1580int
1581nv50_disp_oneinit(struct nvkm_disp *disp)
1582{
1583	const struct nvkm_disp_func *func = disp->func;
1584	struct nvkm_subdev *subdev = &disp->engine.subdev;
1585	struct nvkm_device *device = subdev->device;
1586	struct nvkm_bios *bios = device->bios;
1587	struct nvkm_outp *outp, *outt, *pair;
1588	struct nvkm_conn *conn;
1589	struct nvkm_ior *ior;
1590	int ret, i;
1591	u8  ver, hdr;
1592	u32 data;
1593	struct dcb_output dcbE;
1594	struct nvbios_connE connE;
1595
1596	if (func->wndw.cnt) {
1597		disp->wndw.nr = func->wndw.cnt(disp, &disp->wndw.mask);
1598		nvkm_debug(subdev, "Window(s): %d (%08lx)\n", disp->wndw.nr, disp->wndw.mask);
1599	}
1600
1601	disp->head.nr = func->head.cnt(disp, &disp->head.mask);
1602	nvkm_debug(subdev, "  Head(s): %d (%02lx)\n", disp->head.nr, disp->head.mask);
1603	for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
1604		ret = func->head.new(disp, i);
1605		if (ret)
1606			return ret;
1607	}
1608
1609	if (func->dac.cnt) {
1610		disp->dac.nr = func->dac.cnt(disp, &disp->dac.mask);
1611		nvkm_debug(subdev, "   DAC(s): %d (%02lx)\n", disp->dac.nr, disp->dac.mask);
1612		for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
1613			ret = func->dac.new(disp, i);
1614			if (ret)
1615				return ret;
1616		}
1617	}
1618
1619	if (func->pior.cnt) {
1620		disp->pior.nr = func->pior.cnt(disp, &disp->pior.mask);
1621		nvkm_debug(subdev, "  PIOR(s): %d (%02lx)\n", disp->pior.nr, disp->pior.mask);
1622		for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
1623			ret = func->pior.new(disp, i);
1624			if (ret)
1625				return ret;
1626		}
1627	}
1628
1629	disp->sor.nr = func->sor.cnt(disp, &disp->sor.mask);
1630	nvkm_debug(subdev, "   SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
1631	for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
1632		ret = func->sor.new(disp, i);
1633		if (ret)
1634			return ret;
1635	}
1636
1637	ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
1638	if (ret)
1639		return ret;
1640
1641	ret = nvkm_ramht_new(device, func->ramht_size ? func->ramht_size : 0x1000, 0, disp->inst,
1642			     &disp->ramht);
1643	if (ret)
1644		return ret;
1645
1646	/* Create output path objects for each VBIOS display path. */
1647	i = -1;
1648	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
1649		if (WARN_ON((ver & 0xf0) != 0x40))
1650			return -EINVAL;
1651		if (dcbE.type == DCB_OUTPUT_UNUSED)
1652			continue;
1653		if (dcbE.type == DCB_OUTPUT_EOL)
1654			break;
1655		outp = NULL;
1656
1657		switch (dcbE.type) {
1658		case DCB_OUTPUT_ANALOG:
1659		case DCB_OUTPUT_TMDS:
1660		case DCB_OUTPUT_LVDS:
1661			ret = nvkm_outp_new(disp, i, &dcbE, &outp);
1662			break;
1663		case DCB_OUTPUT_DP:
1664			ret = nvkm_dp_new(disp, i, &dcbE, &outp);
1665			break;
1666		case DCB_OUTPUT_TV:
1667		case DCB_OUTPUT_WFD:
1668			/* No support for WFD yet. */
1669			ret = -ENODEV;
1670			continue;
1671		default:
1672			nvkm_warn(subdev, "dcb %d type %d unknown\n",
1673				  i, dcbE.type);
1674			continue;
1675		}
1676
1677		if (ret) {
1678			if (outp) {
1679				if (ret != -ENODEV)
1680					OUTP_ERR(outp, "ctor failed: %d", ret);
1681				else
1682					OUTP_DBG(outp, "not supported");
1683				nvkm_outp_del(&outp);
1684				continue;
1685			}
1686			nvkm_error(subdev, "failed to create outp %d\n", i);
1687			continue;
1688		}
1689
1690		list_add_tail(&outp->head, &disp->outps);
1691	}
1692
1693	/* Create connector objects based on available output paths. */
1694	list_for_each_entry_safe(outp, outt, &disp->outps, head) {
1695		/* VBIOS data *should* give us the most useful information. */
1696		data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
1697				     &connE);
1698
1699		/* No bios connector data... */
1700		if (!data) {
1701			/* Heuristic: anything with the same ccb index is
1702			 * considered to be on the same connector, any
1703			 * output path without an associated ccb entry will
1704			 * be put on its own connector.
1705			 */
1706			int ccb_index = outp->info.i2c_index;
1707			if (ccb_index != 0xf) {
1708				list_for_each_entry(pair, &disp->outps, head) {
1709					if (pair->info.i2c_index == ccb_index) {
1710						outp->conn = pair->conn;
1711						break;
1712					}
1713				}
1714			}
1715
1716			/* Connector shared with another output path. */
1717			if (outp->conn)
1718				continue;
1719
1720			memset(&connE, 0x00, sizeof(connE));
1721			connE.type = DCB_CONNECTOR_NONE;
1722			i = -1;
1723		} else {
1724			i = outp->info.connector;
1725		}
1726
1727		/* Check that we haven't already created this connector. */
1728		list_for_each_entry(conn, &disp->conns, head) {
1729			if (conn->index == outp->info.connector) {
1730				outp->conn = conn;
1731				break;
1732			}
1733		}
1734
1735		if (outp->conn)
1736			continue;
1737
1738		/* Apparently we need to create a new one! */
1739		ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
1740		if (ret) {
1741			nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret);
1742			nvkm_conn_del(&outp->conn);
1743			list_del(&outp->head);
1744			nvkm_outp_del(&outp);
1745			continue;
1746		}
1747
1748		list_add_tail(&outp->conn->head, &disp->conns);
1749	}
1750
1751	/* Enforce identity-mapped SOR assignment for panels, which have
1752	 * certain bits (ie. backlight controls) wired to a specific SOR.
1753	 */
1754	list_for_each_entry(outp, &disp->outps, head) {
1755		if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
1756		    outp->conn->info.type == DCB_CONNECTOR_eDP) {
1757			ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
1758			if (!WARN_ON(!ior))
1759				ior->identity = true;
1760			outp->identity = true;
1761		}
1762	}
1763
1764	return 0;
1765}
1766
1767static const struct nvkm_disp_func
1768nv50_disp = {
1769	.oneinit = nv50_disp_oneinit,
1770	.init = nv50_disp_init,
1771	.fini = nv50_disp_fini,
1772	.intr = nv50_disp_intr,
1773	.super = nv50_disp_super,
1774	.uevent = &nv50_disp_chan_uevent,
1775	.head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
1776	.dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
1777	.sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
1778	.pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
1779	.root = { 0, 0, NV50_DISP },
1780	.user = {
1781		{{0,0,NV50_DISP_CURSOR             }, nvkm_disp_chan_new, &nv50_disp_curs },
1782		{{0,0,NV50_DISP_OVERLAY            }, nvkm_disp_chan_new, &nv50_disp_oimm },
1783		{{0,0,NV50_DISP_BASE_CHANNEL_DMA   }, nvkm_disp_chan_new, &nv50_disp_base },
1784		{{0,0,NV50_DISP_CORE_CHANNEL_DMA   }, nvkm_disp_core_new, &nv50_disp_core },
1785		{{0,0,NV50_DISP_OVERLAY_CHANNEL_DMA}, nvkm_disp_chan_new, &nv50_disp_ovly },
1786		{}
1787	}
1788};
1789
1790int
1791nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
1792	      struct nvkm_disp **pdisp)
1793{
1794	return nvkm_disp_new_(&nv50_disp, device, type, inst, pdisp);
1795}
1796