1/*	$NetBSD: nouveau_nvkm_engine_disp_base.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $	*/
2
3/*
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26#include <sys/cdefs.h>
27__KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_disp_base.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $");
28
29#include "priv.h"
30#include "conn.h"
31#include "dp.h"
32#include "head.h"
33#include "ior.h"
34#include "outp.h"
35
36#include <core/client.h>
37#include <core/notify.h>
38#include <core/oproxy.h>
39#include <subdev/bios.h>
40#include <subdev/bios/dcb.h>
41
42#include <nvif/class.h>
43#include <nvif/cl0046.h>
44#include <nvif/event.h>
45#include <nvif/unpack.h>
46
47static void
48nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int id)
49{
50	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
51	struct nvkm_head *head = nvkm_head_find(disp, id);
52	if (head)
53		head->func->vblank_put(head);
54}
55
56static void
57nvkm_disp_vblank_init(struct nvkm_event *event, int type, int id)
58{
59	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
60	struct nvkm_head *head = nvkm_head_find(disp, id);
61	if (head)
62		head->func->vblank_get(head);
63}
64
65static int
66nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
67		      struct nvkm_notify *notify)
68{
69	struct nvkm_disp *disp =
70		container_of(notify->event, typeof(*disp), vblank);
71	union {
72		struct nvif_notify_head_req_v0 v0;
73	} *req = data;
74	int ret = -ENOSYS;
75
76	if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
77		notify->size = sizeof(struct nvif_notify_head_rep_v0);
78		if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
79			notify->types = 1;
80			notify->index = req->v0.head;
81			return 0;
82		}
83	}
84
85	return ret;
86}
87
88static const struct nvkm_event_func
89nvkm_disp_vblank_func = {
90	.ctor = nvkm_disp_vblank_ctor,
91	.init = nvkm_disp_vblank_init,
92	.fini = nvkm_disp_vblank_fini,
93};
94
95void
96nvkm_disp_vblank(struct nvkm_disp *disp, int head)
97{
98	struct nvif_notify_head_rep_v0 rep = {};
99	nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
100}
101
102static int
103nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
104		   struct nvkm_notify *notify)
105{
106	struct nvkm_disp *disp =
107		container_of(notify->event, typeof(*disp), hpd);
108	union {
109		struct nvif_notify_conn_req_v0 v0;
110	} *req = data;
111	struct nvkm_outp *outp;
112	int ret = -ENOSYS;
113
114	if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
115		notify->size = sizeof(struct nvif_notify_conn_rep_v0);
116		list_for_each_entry(outp, &disp->outp, head) {
117			if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
118				if (ret = -ENODEV, outp->conn->hpd.event) {
119					notify->types = req->v0.mask;
120					notify->index = req->v0.conn;
121					ret = 0;
122				}
123				break;
124			}
125		}
126	}
127
128	return ret;
129}
130
131static const struct nvkm_event_func
132nvkm_disp_hpd_func = {
133	.ctor = nvkm_disp_hpd_ctor
134};
135
136int
137nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
138{
139	struct nvkm_disp *disp = nvkm_disp(object->engine);
140	switch (type) {
141	case NV04_DISP_NTFY_VBLANK:
142		*event = &disp->vblank;
143		return 0;
144	case NV04_DISP_NTFY_CONN:
145		*event = &disp->hpd;
146		return 0;
147	default:
148		break;
149	}
150	return -EINVAL;
151}
152
153static void
154nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
155{
156	struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
157	mutex_lock(&disp->engine.subdev.mutex);
158	if (disp->client == oproxy)
159		disp->client = NULL;
160	mutex_unlock(&disp->engine.subdev.mutex);
161}
162
163static const struct nvkm_oproxy_func
164nvkm_disp_class = {
165	.dtor[1] = nvkm_disp_class_del,
166};
167
168static int
169nvkm_disp_class_new(struct nvkm_device *device,
170		    const struct nvkm_oclass *oclass, void *data, u32 size,
171		    struct nvkm_object **pobject)
172{
173	const struct nvkm_disp_oclass *sclass = oclass->engn;
174	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
175	struct nvkm_oproxy *oproxy;
176	int ret;
177
178	ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
179	if (ret)
180		return ret;
181	*pobject = &oproxy->base;
182
183	mutex_lock(&disp->engine.subdev.mutex);
184	if (disp->client) {
185		mutex_unlock(&disp->engine.subdev.mutex);
186		return -EBUSY;
187	}
188	disp->client = oproxy;
189	mutex_unlock(&disp->engine.subdev.mutex);
190
191	return sclass->ctor(disp, oclass, data, size, &oproxy->object);
192}
193
194static const struct nvkm_device_oclass
195nvkm_disp_sclass = {
196	.ctor = nvkm_disp_class_new,
197};
198
199static int
200nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
201		    const struct nvkm_device_oclass **class)
202{
203	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
204	if (index == 0) {
205		const struct nvkm_disp_oclass *root = disp->func->root(disp);
206		oclass->base = root->base;
207		oclass->engn = root;
208		*class = &nvkm_disp_sclass;
209		return 0;
210	}
211	return 1;
212}
213
214static void
215nvkm_disp_intr(struct nvkm_engine *engine)
216{
217	struct nvkm_disp *disp = nvkm_disp(engine);
218	disp->func->intr(disp);
219}
220
221static int
222nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
223{
224	struct nvkm_disp *disp = nvkm_disp(engine);
225	struct nvkm_conn *conn;
226	struct nvkm_outp *outp;
227
228	if (disp->func->fini)
229		disp->func->fini(disp);
230
231	list_for_each_entry(outp, &disp->outp, head) {
232		nvkm_outp_fini(outp);
233	}
234
235	list_for_each_entry(conn, &disp->conn, head) {
236		nvkm_conn_fini(conn);
237	}
238
239	return 0;
240}
241
242static int
243nvkm_disp_init(struct nvkm_engine *engine)
244{
245	struct nvkm_disp *disp = nvkm_disp(engine);
246	struct nvkm_conn *conn;
247	struct nvkm_outp *outp;
248	struct nvkm_ior *ior;
249
250	list_for_each_entry(conn, &disp->conn, head) {
251		nvkm_conn_init(conn);
252	}
253
254	list_for_each_entry(outp, &disp->outp, head) {
255		nvkm_outp_init(outp);
256	}
257
258	if (disp->func->init) {
259		int ret = disp->func->init(disp);
260		if (ret)
261			return ret;
262	}
263
264	/* Set 'normal' (ie. when it's attached to a head) state for
265	 * each output resource to 'fully enabled'.
266	 */
267	list_for_each_entry(ior, &disp->ior, head) {
268		ior->func->power(ior, true, true, true, true, true);
269	}
270
271	return 0;
272}
273
274static int
275nvkm_disp_oneinit(struct nvkm_engine *engine)
276{
277	struct nvkm_disp *disp = nvkm_disp(engine);
278	struct nvkm_subdev *subdev = &disp->engine.subdev;
279	struct nvkm_bios *bios = subdev->device->bios;
280	struct nvkm_outp *outp, *outt, *pair;
281	struct nvkm_conn *conn;
282	struct nvkm_head *head;
283	struct nvkm_ior *ior;
284	struct nvbios_connE connE;
285	struct dcb_output dcbE;
286	u8  hpd = 0, ver, hdr;
287	u32 data;
288	int ret, i;
289
290	/* Create output path objects for each VBIOS display path. */
291	i = -1;
292	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
293		if (ver < 0x40) /* No support for chipsets prior to NV50. */
294			break;
295		if (dcbE.type == DCB_OUTPUT_UNUSED)
296			continue;
297		if (dcbE.type == DCB_OUTPUT_EOL)
298			break;
299		outp = NULL;
300
301		switch (dcbE.type) {
302		case DCB_OUTPUT_ANALOG:
303		case DCB_OUTPUT_TV:
304		case DCB_OUTPUT_TMDS:
305		case DCB_OUTPUT_LVDS:
306			ret = nvkm_outp_new(disp, i, &dcbE, &outp);
307			break;
308		case DCB_OUTPUT_DP:
309			ret = nvkm_dp_new(disp, i, &dcbE, &outp);
310			break;
311		case DCB_OUTPUT_WFD:
312			/* No support for WFD yet. */
313			ret = -ENODEV;
314			continue;
315		default:
316			nvkm_warn(subdev, "dcb %d type %d unknown\n",
317				  i, dcbE.type);
318			continue;
319		}
320
321		if (ret) {
322			if (outp) {
323				if (ret != -ENODEV)
324					OUTP_ERR(outp, "ctor failed: %d", ret);
325				else
326					OUTP_DBG(outp, "not supported");
327				nvkm_outp_del(&outp);
328				continue;
329			}
330			nvkm_error(subdev, "failed to create outp %d\n", i);
331			continue;
332		}
333
334		list_add_tail(&outp->head, &disp->outp);
335		hpd = max(hpd, (u8)(dcbE.connector + 1));
336	}
337
338	/* Create connector objects based on available output paths. */
339	list_for_each_entry_safe(outp, outt, &disp->outp, head) {
340		/* VBIOS data *should* give us the most useful information. */
341		data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
342				     &connE);
343
344		/* No bios connector data... */
345		if (!data) {
346			/* Heuristic: anything with the same ccb index is
347			 * considered to be on the same connector, any
348			 * output path without an associated ccb entry will
349			 * be put on its own connector.
350			 */
351			int ccb_index = outp->info.i2c_index;
352			if (ccb_index != 0xf) {
353				list_for_each_entry(pair, &disp->outp, head) {
354					if (pair->info.i2c_index == ccb_index) {
355						outp->conn = pair->conn;
356						break;
357					}
358				}
359			}
360
361			/* Connector shared with another output path. */
362			if (outp->conn)
363				continue;
364
365			memset(&connE, 0x00, sizeof(connE));
366			connE.type = DCB_CONNECTOR_NONE;
367			i = -1;
368		} else {
369			i = outp->info.connector;
370		}
371
372		/* Check that we haven't already created this connector. */
373		list_for_each_entry(conn, &disp->conn, head) {
374			if (conn->index == outp->info.connector) {
375				outp->conn = conn;
376				break;
377			}
378		}
379
380		if (outp->conn)
381			continue;
382
383		/* Apparently we need to create a new one! */
384		ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
385		if (ret) {
386			nvkm_error(&disp->engine.subdev,
387				   "failed to create outp %d conn: %d\n",
388				   outp->index, ret);
389			nvkm_conn_del(&outp->conn);
390			list_del(&outp->head);
391			nvkm_outp_del(&outp);
392			continue;
393		}
394
395		list_add_tail(&outp->conn->head, &disp->conn);
396	}
397
398	ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
399	if (ret)
400		return ret;
401
402	if (disp->func->oneinit) {
403		ret = disp->func->oneinit(disp);
404		if (ret)
405			return ret;
406	}
407
408	/* Enforce identity-mapped SOR assignment for panels, which have
409	 * certain bits (ie. backlight controls) wired to a specific SOR.
410	 */
411	list_for_each_entry(outp, &disp->outp, head) {
412		if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
413		    outp->conn->info.type == DCB_CONNECTOR_eDP) {
414			ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
415			if (!WARN_ON(!ior))
416				ior->identity = true;
417			outp->identity = true;
418		}
419	}
420
421	i = 0;
422	list_for_each_entry(head, &disp->head, head)
423		i = max(i, head->id + 1);
424
425	return nvkm_event_init(&nvkm_disp_vblank_func, 1, i, &disp->vblank);
426}
427
428static void *
429nvkm_disp_dtor(struct nvkm_engine *engine)
430{
431	struct nvkm_disp *disp = nvkm_disp(engine);
432	struct nvkm_conn *conn;
433	struct nvkm_outp *outp;
434	void *data = disp;
435
436	if (disp->func->dtor)
437		data = disp->func->dtor(disp);
438
439	nvkm_event_fini(&disp->vblank);
440	nvkm_event_fini(&disp->hpd);
441
442	while (!list_empty(&disp->conn)) {
443		conn = list_first_entry(&disp->conn, typeof(*conn), head);
444		list_del(&conn->head);
445		nvkm_conn_del(&conn);
446	}
447
448	while (!list_empty(&disp->outp)) {
449		outp = list_first_entry(&disp->outp, typeof(*outp), head);
450		list_del(&outp->head);
451		nvkm_outp_del(&outp);
452	}
453
454	while (!list_empty(&disp->ior)) {
455		struct nvkm_ior *ior =
456			list_first_entry(&disp->ior, typeof(*ior), head);
457		nvkm_ior_del(&ior);
458	}
459
460	while (!list_empty(&disp->head)) {
461		struct nvkm_head *head =
462			list_first_entry(&disp->head, typeof(*head), head);
463		nvkm_head_del(&head);
464	}
465
466	return data;
467}
468
469static const struct nvkm_engine_func
470nvkm_disp = {
471	.dtor = nvkm_disp_dtor,
472	.oneinit = nvkm_disp_oneinit,
473	.init = nvkm_disp_init,
474	.fini = nvkm_disp_fini,
475	.intr = nvkm_disp_intr,
476	.base.sclass = nvkm_disp_class_get,
477};
478
479int
480nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
481	       int index, struct nvkm_disp *disp)
482{
483	disp->func = func;
484	INIT_LIST_HEAD(&disp->head);
485	INIT_LIST_HEAD(&disp->ior);
486	INIT_LIST_HEAD(&disp->outp);
487	INIT_LIST_HEAD(&disp->conn);
488	return nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
489}
490
491int
492nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
493	       int index, struct nvkm_disp **pdisp)
494{
495	if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
496		return -ENOMEM;
497	return nvkm_disp_ctor(func, device, index, *pdisp);
498}
499