1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <subdev/mc.h>
25#include <subdev/timer.h>
26#include <subdev/top.h>
27
28void
29nvkm_falcon_intr_retrigger(struct nvkm_falcon *falcon)
30{
31	if (falcon->func->intr_retrigger)
32		falcon->func->intr_retrigger(falcon);
33}
34
35bool
36nvkm_falcon_riscv_active(struct nvkm_falcon *falcon)
37{
38	if (!falcon->func->riscv_active)
39		return false;
40
41	return falcon->func->riscv_active(falcon);
42}
43
44static const struct nvkm_falcon_func_dma *
45nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
46{
47	switch (*mem_type) {
48	case IMEM: return falcon->func->imem_dma;
49	case DMEM: return falcon->func->dmem_dma;
50	default:
51		return NULL;
52	}
53}
54
55int
56nvkm_falcon_dma_wr(struct nvkm_falcon *falcon, const u8 *img, u64 dma_addr, u32 dma_base,
57		   enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec)
58{
59	const struct nvkm_falcon_func_dma *dma = nvkm_falcon_dma(falcon, &mem_type, &mem_base);
60	const char *type = nvkm_falcon_mem(mem_type);
61	const int dmalen = 256;
62	u32 dma_start = 0;
63	u32 dst, src, cmd;
64	int ret, i;
65
66	if (WARN_ON(!dma->xfer))
67		return -EINVAL;
68
69	if (mem_type == DMEM) {
70		dma_start = dma_base;
71		dma_addr += dma_base;
72	}
73
74	FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x (%010llx %08x)",
75		 type, mem_base, len, dma_base, dma_addr - dma_base, dma_start);
76	if (WARN_ON(!len || (len & (dmalen - 1))))
77		return -EINVAL;
78
79	ret = dma->init(falcon, dma_addr, dmalen, mem_type, sec, &cmd);
80	if (ret)
81		return ret;
82
83	dst = mem_base;
84	src = dma_base;
85	if (len) {
86		while (len >= dmalen) {
87			dma->xfer(falcon, dst, src - dma_start, cmd);
88
89			if (img && nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
90				for (i = 0; i < dmalen; i += 4, mem_base += 4) {
91					const int w = 8, x = (i / 4) % w;
92
93					if (x == 0)
94						printk(KERN_INFO "%s %08x <-", type, mem_base);
95					printk(KERN_CONT " %08x", *(u32 *)(img + src + i));
96					if (x == (w - 1) || ((i + 4) == dmalen))
97						printk(KERN_CONT " <- %08x+%08x", dma_base,
98						       src + i - dma_base - (x * 4));
99					if (i == (7 * 4))
100						printk(KERN_CONT " *");
101				}
102			}
103
104			if (nvkm_msec(falcon->owner->device, 2000,
105				if (dma->done(falcon))
106					break;
107			) < 0)
108				return -ETIMEDOUT;
109
110			src += dmalen;
111			dst += dmalen;
112			len -= dmalen;
113		}
114		WARN_ON(len);
115	}
116
117	return 0;
118}
119
120static const struct nvkm_falcon_func_pio *
121nvkm_falcon_pio(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
122{
123	switch (*mem_type) {
124	case IMEM:
125		return falcon->func->imem_pio;
126	case DMEM:
127		if (!falcon->func->emem_addr || *mem_base < falcon->func->emem_addr)
128			return falcon->func->dmem_pio;
129
130		*mem_base -= falcon->func->emem_addr;
131		fallthrough;
132	case EMEM:
133		return falcon->func->emem_pio;
134	default:
135		return NULL;
136	}
137}
138
139int
140nvkm_falcon_pio_rd(struct nvkm_falcon *falcon, u8 port, enum nvkm_falcon_mem mem_type, u32 mem_base,
141		   const u8 *img, u32 img_base, int len)
142{
143	const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
144	const char *type = nvkm_falcon_mem(mem_type);
145	int xfer_len;
146
147	if (WARN_ON(!pio || !pio->rd))
148		return -EINVAL;
149
150	FLCN_DBG(falcon, "%s %08x -> %08x bytes at %08x", type, mem_base, len, img_base);
151	if (WARN_ON(!len || (len & (pio->min - 1))))
152		return -EINVAL;
153
154	pio->rd_init(falcon, port, mem_base);
155	do {
156		xfer_len = min(len, pio->max);
157		pio->rd(falcon, port, img, xfer_len);
158
159		if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
160			for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
161				if (((img_base / 4) % 8) == 0)
162					printk(KERN_INFO "%s %08x ->", type, mem_base);
163				printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
164			}
165		}
166
167		img += xfer_len;
168		len -= xfer_len;
169	} while (len);
170
171	return 0;
172}
173
174int
175nvkm_falcon_pio_wr(struct nvkm_falcon *falcon, const u8 *img, u32 img_base, u8 port,
176		   enum nvkm_falcon_mem mem_type, u32 mem_base, int len, u16 tag, bool sec)
177{
178	const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base);
179	const char *type = nvkm_falcon_mem(mem_type);
180	int xfer_len;
181
182	if (WARN_ON(!pio || !pio->wr))
183		return -EINVAL;
184
185	FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x", type, mem_base, len, img_base);
186	if (WARN_ON(!len || (len & (pio->min - 1))))
187		return -EINVAL;
188
189	pio->wr_init(falcon, port, sec, mem_base);
190	do {
191		xfer_len = min(len, pio->max);
192		pio->wr(falcon, port, img, xfer_len, tag++);
193
194		if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
195			for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) {
196				if (((img_base / 4) % 8) == 0)
197					printk(KERN_INFO "%s %08x <-", type, mem_base);
198				printk(KERN_CONT " %08x", *(u32 *)(img + img_base));
199				if ((img_base / 4) == 7 && mem_type == IMEM)
200					printk(KERN_CONT " %04x", tag - 1);
201			}
202		}
203
204		img += xfer_len;
205		len -= xfer_len;
206	} while (len);
207
208	return 0;
209}
210
211void
212nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
213		      u32 size, u16 tag, u8 port, bool secure)
214{
215	if (secure && !falcon->secret) {
216		nvkm_warn(falcon->user,
217			  "writing with secure tag on a non-secure falcon!\n");
218		return;
219	}
220
221	falcon->func->load_imem(falcon, data, start, size, tag, port,
222				secure);
223}
224
225void
226nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
227		      u32 size, u8 port)
228{
229	mutex_lock(&falcon->dmem_mutex);
230
231	falcon->func->load_dmem(falcon, data, start, size, port);
232
233	mutex_unlock(&falcon->dmem_mutex);
234}
235
236void
237nvkm_falcon_start(struct nvkm_falcon *falcon)
238{
239	falcon->func->start(falcon);
240}
241
242int
243nvkm_falcon_reset(struct nvkm_falcon *falcon)
244{
245	int ret;
246
247	ret = falcon->func->disable(falcon);
248	if (WARN_ON(ret))
249		return ret;
250
251	return nvkm_falcon_enable(falcon);
252}
253
254static int
255nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
256{
257	const struct nvkm_falcon_func *func = falcon->func;
258	const struct nvkm_subdev *subdev = falcon->owner;
259	u32 reg;
260
261	if (!falcon->addr) {
262		falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
263		if (WARN_ON(!falcon->addr))
264			return -ENODEV;
265	}
266
267	reg = nvkm_falcon_rd32(falcon, 0x12c);
268	falcon->version = reg & 0xf;
269	falcon->secret = (reg >> 4) & 0x3;
270	falcon->code.ports = (reg >> 8) & 0xf;
271	falcon->data.ports = (reg >> 12) & 0xf;
272
273	reg = nvkm_falcon_rd32(falcon, 0x108);
274	falcon->code.limit = (reg & 0x1ff) << 8;
275	falcon->data.limit = (reg & 0x3fe00) >> 1;
276
277	if (func->debug) {
278		u32 val = nvkm_falcon_rd32(falcon, func->debug);
279		falcon->debug = (val >> 20) & 0x1;
280	}
281
282	return 0;
283}
284
285void
286nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
287{
288	if (unlikely(!falcon))
289		return;
290
291	mutex_lock(&falcon->mutex);
292	if (falcon->user == user) {
293		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
294		falcon->user = NULL;
295	}
296	mutex_unlock(&falcon->mutex);
297}
298
299int
300nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
301{
302	int ret = 0;
303
304	mutex_lock(&falcon->mutex);
305	if (falcon->user) {
306		nvkm_error(user, "%s falcon already acquired by %s!\n",
307			   falcon->name, falcon->user->name);
308		mutex_unlock(&falcon->mutex);
309		return -EBUSY;
310	}
311
312	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
313	if (!falcon->oneinit)
314		ret = nvkm_falcon_oneinit(falcon);
315	falcon->user = user;
316	mutex_unlock(&falcon->mutex);
317	return ret;
318}
319
320void
321nvkm_falcon_dtor(struct nvkm_falcon *falcon)
322{
323}
324
325int
326nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
327		 struct nvkm_subdev *subdev, const char *name, u32 addr,
328		 struct nvkm_falcon *falcon)
329{
330	falcon->func = func;
331	falcon->owner = subdev;
332	falcon->name = name;
333	falcon->addr = addr;
334	falcon->addr2 = func->addr2;
335	mutex_init(&falcon->mutex);
336	mutex_init(&falcon->dmem_mutex);
337	return 0;
338}
339