1// SPDX-License-Identifier: MIT
2#ifndef __NVKM_PMU_MEMX_H__
3#define __NVKM_PMU_MEMX_H__
4#include "priv.h"
5
6struct nvkm_memx {
7	struct nvkm_pmu *pmu;
8	u32 base;
9	u32 size;
10	struct {
11		u32 mthd;
12		u32 size;
13		u32 data[64];
14	} c;
15};
16
17static void
18memx_out(struct nvkm_memx *memx)
19{
20	struct nvkm_device *device = memx->pmu->subdev.device;
21	int i;
22
23	if (memx->c.mthd) {
24		nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
25		for (i = 0; i < memx->c.size; i++)
26			nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
27		memx->c.mthd = 0;
28		memx->c.size = 0;
29	}
30}
31
32static void
33memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
34{
35	if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
36	    (memx->c.mthd && memx->c.mthd != mthd))
37		memx_out(memx);
38	memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
39	memx->c.size += size;
40	memx->c.mthd  = mthd;
41}
42
43int
44nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
45{
46	struct nvkm_device *device = pmu->subdev.device;
47	struct nvkm_memx *memx;
48	u32 reply[2];
49	int ret;
50
51	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
52			    MEMX_INFO_DATA, 0);
53	if (ret)
54		return ret;
55
56	memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
57	if (!memx)
58		return -ENOMEM;
59	memx->pmu = pmu;
60	memx->base = reply[0];
61	memx->size = reply[1];
62
63	/* acquire data segment access */
64	do {
65		nvkm_wr32(device, 0x10a580, 0x00000003);
66	} while (nvkm_rd32(device, 0x10a580) != 0x00000003);
67	nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
68	return 0;
69}
70
71int
72nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
73{
74	struct nvkm_memx *memx = *pmemx;
75	struct nvkm_pmu *pmu = memx->pmu;
76	struct nvkm_subdev *subdev = &pmu->subdev;
77	struct nvkm_device *device = subdev->device;
78	u32 finish, reply[2];
79
80	/* flush the cache... */
81	memx_out(memx);
82
83	/* release data segment access */
84	finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
85	nvkm_wr32(device, 0x10a580, 0x00000000);
86
87	/* call MEMX process to execute the script, and wait for reply */
88	if (exec) {
89		nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
90			      memx->base, finish);
91		nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
92			   reply[0], reply[1]);
93	}
94
95	kfree(memx);
96	return 0;
97}
98
99void
100nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
101{
102	nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
103	memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
104}
105
106void
107nvkm_memx_wait(struct nvkm_memx *memx,
108		  u32 addr, u32 mask, u32 data, u32 nsec)
109{
110	nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
111		   addr, mask, data, nsec);
112	memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
113	memx_out(memx); /* fuc can't handle multiple */
114}
115
116void
117nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
118{
119	nvkm_debug(&memx->pmu->subdev, "    DELAY = %d ns\n", nsec);
120	memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
121	memx_out(memx); /* fuc can't handle multiple */
122}
123
124void
125nvkm_memx_wait_vblank(struct nvkm_memx *memx)
126{
127	struct nvkm_subdev *subdev = &memx->pmu->subdev;
128	struct nvkm_device *device = subdev->device;
129	u32 heads, x, y, px = 0;
130	int i, head_sync;
131
132	if (device->chipset < 0xd0) {
133		heads = nvkm_rd32(device, 0x610050);
134		for (i = 0; i < 2; i++) {
135			/* Heuristic: sync to head with biggest resolution */
136			if (heads & (2 << (i << 3))) {
137				x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
138				y = (x & 0xffff0000) >> 16;
139				x &= 0x0000ffff;
140				if ((x * y) > px) {
141					px = (x * y);
142					head_sync = i;
143				}
144			}
145		}
146	}
147
148	if (px == 0) {
149		nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
150		return;
151	}
152
153	nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
154	memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
155	memx_out(memx); /* fuc can't handle multiple */
156}
157
158void
159nvkm_memx_train(struct nvkm_memx *memx)
160{
161	nvkm_debug(&memx->pmu->subdev, "   MEM TRAIN\n");
162	memx_cmd(memx, MEMX_TRAIN, 0, NULL);
163}
164
165int
166nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
167{
168	struct nvkm_device *device = pmu->subdev.device;
169	u32 reply[2], base, size, i;
170	int ret;
171
172	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
173			    MEMX_INFO_TRAIN, 0);
174	if (ret)
175		return ret;
176
177	base = reply[0];
178	size = reply[1] >> 2;
179	if (size > rsize)
180		return -ENOMEM;
181
182	/* read the packet */
183	nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
184
185	for (i = 0; i < size; i++)
186		res[i] = nvkm_rd32(device, 0x10a1c4);
187
188	return 0;
189}
190
191void
192nvkm_memx_block(struct nvkm_memx *memx)
193{
194	nvkm_debug(&memx->pmu->subdev, "   HOST BLOCKED\n");
195	memx_cmd(memx, MEMX_ENTER, 0, NULL);
196}
197
198void
199nvkm_memx_unblock(struct nvkm_memx *memx)
200{
201	nvkm_debug(&memx->pmu->subdev, "   HOST UNBLOCKED\n");
202	memx_cmd(memx, MEMX_LEAVE, 0, NULL);
203}
204#endif
205