1/*
2 * Copyright 2022 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <core/memory.h>
25#include <subdev/mc.h>
26#include <subdev/timer.h>
27
28void
29gm200_flcn_tracepc(struct nvkm_falcon *falcon)
30{
31	u32 sctl = nvkm_falcon_rd32(falcon, 0x240);
32	u32 tidx = nvkm_falcon_rd32(falcon, 0x148);
33	int nr = (tidx & 0x00ff0000) >> 16, sp, ip;
34
35	FLCN_ERR(falcon, "TRACEPC SCTL %08x TIDX %08x", sctl, tidx);
36	for (sp = 0; sp < nr; sp++) {
37		nvkm_falcon_wr32(falcon, 0x148, sp);
38		ip = nvkm_falcon_rd32(falcon, 0x14c);
39		FLCN_ERR(falcon, "TRACEPC: %08x", ip);
40	}
41}
42
43static void
44gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len)
45{
46	while (len >= 4) {
47		*(u32 *)img = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
48		img += 4;
49		len -= 4;
50	}
51
52	/* Sigh.  Tegra PMU FW's init message... */
53	if (len) {
54		u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
55
56		while (len--) {
57			*(u8 *)img++ = data & 0xff;
58			data >>= 8;
59		}
60	}
61}
62
63static void
64gm200_flcn_pio_dmem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base)
65{
66	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(25) | dmem_base);
67}
68
69static void
70gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
71{
72	while (len >= 4) {
73		nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), *(u32 *)img);
74		img += 4;
75		len -= 4;
76	}
77
78	WARN_ON(len);
79}
80
81static void
82gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 dmem_base)
83{
84	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(24) | dmem_base);
85}
86
87const struct nvkm_falcon_func_pio
88gm200_flcn_dmem_pio = {
89	.min = 1,
90	.max = 0x100,
91	.wr_init = gm200_flcn_pio_dmem_wr_init,
92	.wr = gm200_flcn_pio_dmem_wr,
93	.rd_init = gm200_flcn_pio_dmem_rd_init,
94	.rd = gm200_flcn_pio_dmem_rd,
95};
96
97static void
98gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 imem_base)
99{
100	nvkm_falcon_wr32(falcon, 0x180 + (port * 0x10), (sec ? BIT(28) : 0) | BIT(24) | imem_base);
101}
102
103static void
104gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
105{
106	nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
107	while (len >= 4) {
108		nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
109		img += 4;
110		len -= 4;
111	}
112}
113
114const struct nvkm_falcon_func_pio
115gm200_flcn_imem_pio = {
116	.min = 0x100,
117	.max = 0x100,
118	.wr_init = gm200_flcn_pio_imem_wr_init,
119	.wr = gm200_flcn_pio_imem_wr,
120};
121
122int
123gm200_flcn_bind_stat(struct nvkm_falcon *falcon, bool intr)
124{
125	if (intr && !(nvkm_falcon_rd32(falcon, 0x008) & 0x00000008))
126		return -1;
127
128	return (nvkm_falcon_rd32(falcon, 0x0dc) & 0x00007000) >> 12;
129}
130
131void
132gm200_flcn_bind_inst(struct nvkm_falcon *falcon, int target, u64 addr)
133{
134	nvkm_falcon_mask(falcon, 0x604, 0x00000007, 0x00000000); /* DMAIDX_VIRT */
135	nvkm_falcon_wr32(falcon, 0x054, (1 << 30) | (target << 28) | (addr >> 12));
136	nvkm_falcon_mask(falcon, 0x090, 0x00010000, 0x00010000);
137	nvkm_falcon_mask(falcon, 0x0a4, 0x00000008, 0x00000008);
138}
139
140int
141gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
142{
143	nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
144
145	if (nvkm_msec(falcon->owner->device, 10,
146		if (!(nvkm_falcon_rd32(falcon, 0x10c) & 0x00000006))
147			break;
148	) < 0)
149		return -ETIMEDOUT;
150
151	return 0;
152}
153
154int
155gm200_flcn_enable(struct nvkm_falcon *falcon)
156{
157	struct nvkm_device *device = falcon->owner->device;
158	int ret;
159
160	if (falcon->func->reset_eng) {
161		ret = falcon->func->reset_eng(falcon);
162		if (ret)
163			return ret;
164	}
165
166	if (falcon->func->select) {
167		ret = falcon->func->select(falcon);
168		if (ret)
169			return ret;
170	}
171
172	if (falcon->func->reset_pmc)
173		nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
174
175	ret = falcon->func->reset_wait_mem_scrubbing(falcon);
176	if (ret)
177		return ret;
178
179	nvkm_falcon_wr32(falcon, 0x084, nvkm_rd32(device, 0x000000));
180	return 0;
181}
182
183int
184gm200_flcn_disable(struct nvkm_falcon *falcon)
185{
186	struct nvkm_device *device = falcon->owner->device;
187	int ret;
188
189	if (falcon->func->select) {
190		ret = falcon->func->select(falcon);
191		if (ret)
192			return ret;
193	}
194
195	nvkm_falcon_mask(falcon, 0x048, 0x00000003, 0x00000000);
196	nvkm_falcon_wr32(falcon, 0x014, 0xffffffff);
197
198	if (falcon->func->reset_pmc) {
199		if (falcon->func->reset_prep) {
200			ret = falcon->func->reset_prep(falcon);
201			if (ret)
202				return ret;
203		}
204
205		nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
206	}
207
208	if (falcon->func->reset_eng) {
209		ret = falcon->func->reset_eng(falcon);
210		if (ret)
211			return ret;
212	}
213
214	return 0;
215}
216
217int
218gm200_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr)
219{
220	struct nvkm_falcon *falcon = fw->falcon;
221	u32 mbox0, mbox1;
222	int ret = 0;
223
224	nvkm_falcon_wr32(falcon, 0x040, pmbox0 ? *pmbox0 : 0xcafebeef);
225	if (pmbox1)
226		nvkm_falcon_wr32(falcon, 0x044, *pmbox1);
227
228	nvkm_falcon_wr32(falcon, 0x104, fw->boot_addr);
229	nvkm_falcon_wr32(falcon, 0x100, 0x00000002);
230
231	if (nvkm_msec(falcon->owner->device, 2000,
232		if (nvkm_falcon_rd32(falcon, 0x100) & 0x00000010)
233			break;
234	) < 0)
235		ret = -ETIMEDOUT;
236
237	mbox0 = nvkm_falcon_rd32(falcon, 0x040);
238	mbox1 = nvkm_falcon_rd32(falcon, 0x044);
239	if (FLCN_ERRON(falcon, ret || mbox0 != mbox0_ok, "mbox %08x %08x", mbox0, mbox1))
240		ret = ret ?: -EIO;
241
242	if (irqsclr)
243		nvkm_falcon_mask(falcon, 0x004, 0xffffffff, irqsclr);
244
245	return ret;
246}
247
248int
249gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
250{
251	struct nvkm_falcon *falcon = fw->falcon;
252	int target, ret;
253
254	if (fw->inst) {
255		nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
256
257		switch (nvkm_memory_target(fw->inst)) {
258		case NVKM_MEM_TARGET_VRAM: target = 0; break;
259		case NVKM_MEM_TARGET_HOST: target = 2; break;
260		case NVKM_MEM_TARGET_NCOH: target = 3; break;
261		default:
262			WARN_ON(1);
263			return -EINVAL;
264		}
265
266		falcon->func->bind_inst(falcon, target, nvkm_memory_addr(fw->inst));
267
268		if (nvkm_msec(falcon->owner->device, 10,
269			if (falcon->func->bind_stat(falcon, falcon->func->bind_intr) == 5)
270				break;
271		) < 0)
272			return -ETIMEDOUT;
273
274		nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
275		nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
276
277		if (nvkm_msec(falcon->owner->device, 10,
278			if (falcon->func->bind_stat(falcon, false) == 0)
279				break;
280		) < 0)
281			return -ETIMEDOUT;
282	} else {
283		nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
284		nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
285	}
286
287	if (fw->boot) {
288		switch (nvkm_memory_target(&fw->fw.mem.memory)) {
289		case NVKM_MEM_TARGET_VRAM: target = 4; break;
290		case NVKM_MEM_TARGET_HOST: target = 5; break;
291		case NVKM_MEM_TARGET_NCOH: target = 6; break;
292		default:
293			WARN_ON(1);
294			return -EINVAL;
295		}
296
297		ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
298					 IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
299					 fw->boot_addr >> 8, false);
300		if (ret)
301			return ret;
302
303		return fw->func->load_bld(fw);
304	}
305
306	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->nmem_base_img, fw->nmem_base_img, 0,
307				 IMEM, fw->nmem_base, fw->nmem_size, fw->nmem_base >> 8, false);
308	if (ret)
309		return ret;
310
311	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->imem_base_img, fw->imem_base_img, 0,
312				 IMEM, fw->imem_base, fw->imem_size, fw->imem_base >> 8, true);
313	if (ret)
314		return ret;
315
316	ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->dmem_base_img, fw->dmem_base_img, 0,
317				 DMEM, fw->dmem_base, fw->dmem_size, 0, false);
318	if (ret)
319		return ret;
320
321	return 0;
322}
323
324int
325gm200_flcn_fw_reset(struct nvkm_falcon_fw *fw)
326{
327	return nvkm_falcon_reset(fw->falcon);
328}
329
330int
331gm200_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *sig_base_src)
332{
333	struct nvkm_falcon *falcon = fw->falcon;
334	u32 addr = falcon->func->debug;
335	int ret = 0;
336
337	if (addr) {
338		ret = nvkm_falcon_enable(falcon);
339		if (ret)
340			return ret;
341
342		if (nvkm_falcon_rd32(falcon, addr) & 0x00100000) {
343			*sig_base_src = fw->sig_base_dbg;
344			return 1;
345		}
346	}
347
348	return ret;
349}
350
351const struct nvkm_falcon_fw_func
352gm200_flcn_fw = {
353	.signature = gm200_flcn_fw_signature,
354	.reset = gm200_flcn_fw_reset,
355	.load = gm200_flcn_fw_load,
356	.boot = gm200_flcn_fw_boot,
357};
358