1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#include "priv.h"
23
24#include <core/memory.h>
25#include <subdev/acr.h>
26
27#include <nvfw/flcn.h>
28#include <nvfw/pmu.h>
29
30static int
31gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nvfw_falcon_msg *hdr)
32{
33	struct nv_pmu_acr_bootstrap_falcon_msg *msg =
34		container_of(hdr, typeof(*msg), msg.hdr);
35	return msg->falcon_id;
36}
37
38int
39gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
40			       enum nvkm_acr_lsf_id id)
41{
42	struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
43	struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
44		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
45		.cmd.hdr.size = sizeof(cmd),
46		.cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
47		.flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
48		.falcon_id = id,
49	};
50	int ret;
51
52	ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
53				    gm20b_pmu_acr_bootstrap_falcon_cb,
54				    &pmu->subdev, msecs_to_jiffies(1000));
55	if (ret >= 0) {
56		if (ret != cmd.falcon_id)
57			ret = -EIO;
58		else
59			ret = 0;
60	}
61
62	return ret;
63}
64
65void
66gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
67{
68	struct loader_config hdr;
69	u64 addr;
70
71	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
72	addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
73	hdr.code_dma_base  = lower_32_bits((addr + adjust) >> 8);
74	hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
75	addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
76	hdr.data_dma_base  = lower_32_bits((addr + adjust) >> 8);
77	hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
78	addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8);
79	hdr.overlay_dma_base  = lower_32_bits((addr + adjust) << 8);
80	hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8);
81	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
82
83	loader_config_dump(&acr->subdev, &hdr);
84}
85
86void
87gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld,
88			struct nvkm_acr_lsfw *lsfw)
89{
90	const u64 base = lsfw->offset.img + lsfw->app_start_offset;
91	const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
92	const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
93	const struct loader_config hdr = {
94		.dma_idx = FALCON_DMAIDX_UCODE,
95		.code_dma_base = lower_32_bits(code),
96		.code_size_total = lsfw->app_size,
97		.code_size_to_load = lsfw->app_resident_code_size,
98		.code_entry_point = lsfw->app_imem_entry,
99		.data_dma_base = lower_32_bits(data),
100		.data_size = lsfw->app_resident_data_size,
101		.overlay_dma_base = lower_32_bits(code),
102		.argc = 1,
103		.argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args),
104		.code_dma_base1 = upper_32_bits(code),
105		.data_dma_base1 = upper_32_bits(data),
106		.overlay_dma_base1 = upper_32_bits(code),
107	};
108
109	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
110}
111
112static const struct nvkm_acr_lsf_func
113gm20b_pmu_acr = {
114	.flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
115	.bld_size = sizeof(struct loader_config),
116	.bld_write = gm20b_pmu_acr_bld_write,
117	.bld_patch = gm20b_pmu_acr_bld_patch,
118	.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
119			     BIT_ULL(NVKM_ACR_LSF_FECS) |
120			     BIT_ULL(NVKM_ACR_LSF_GPCCS),
121	.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
122};
123
124static int
125gm20b_pmu_acr_init_wpr_callback(void *priv, struct nvfw_falcon_msg *hdr)
126{
127	struct nv_pmu_acr_init_wpr_region_msg *msg =
128		container_of(hdr, typeof(*msg), msg.hdr);
129	struct nvkm_pmu *pmu = priv;
130	struct nvkm_subdev *subdev = &pmu->subdev;
131
132	if (msg->error_code) {
133		nvkm_error(subdev, "ACR WPR init failure: %d\n",
134			   msg->error_code);
135		return -EINVAL;
136	}
137
138	nvkm_debug(subdev, "ACR WPR init complete\n");
139	complete_all(&pmu->wpr_ready);
140	return 0;
141}
142
143static int
144gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
145{
146	struct nv_pmu_acr_init_wpr_region_cmd cmd = {
147		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
148		.cmd.hdr.size = sizeof(cmd),
149		.cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
150		.region_id = 1,
151		.wpr_offset = 0,
152	};
153
154	return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
155				     gm20b_pmu_acr_init_wpr_callback, pmu, 0);
156}
157
158static int
159gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
160{
161	struct nv_pmu_init_msg msg;
162	int ret;
163
164	ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
165	if (ret)
166		return ret;
167
168	if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
169	    msg.msg_type != NV_PMU_INIT_MSG_INIT)
170		return -EINVAL;
171
172	nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
173					msg.queue_info[0].offset,
174					msg.queue_info[0].size);
175	nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
176					msg.queue_info[1].offset,
177					msg.queue_info[1].size);
178	nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
179					 msg.queue_info[4].offset,
180					 msg.queue_info[4].size);
181	return gm20b_pmu_acr_init_wpr(pmu);
182}
183
184static void
185gm20b_pmu_recv(struct nvkm_pmu *pmu)
186{
187	if (!pmu->initmsg_received) {
188		int ret = pmu->func->initmsg(pmu);
189		if (ret) {
190			nvkm_error(&pmu->subdev, "error parsing init message: %d\n", ret);
191			return;
192		}
193
194		pmu->initmsg_received = true;
195	}
196
197	nvkm_falcon_msgq_recv(pmu->msgq);
198}
199
200static void
201gm20b_pmu_fini(struct nvkm_pmu *pmu)
202{
203	/*TODO: shutdown RTOS. */
204
205	flush_work(&pmu->recv.work);
206	nvkm_falcon_cmdq_fini(pmu->lpq);
207	nvkm_falcon_cmdq_fini(pmu->hpq);
208
209	reinit_completion(&pmu->wpr_ready);
210
211	nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
212}
213
214static int
215gm20b_pmu_init(struct nvkm_pmu *pmu)
216{
217	struct nvkm_falcon *falcon = &pmu->falcon;
218	struct nv_pmu_args args = { .secure_mode = true };
219	u32 addr_args = falcon->data.limit - sizeof(args);
220	int ret;
221
222	ret = nvkm_falcon_get(&pmu->falcon, &pmu->subdev);
223	if (ret)
224		return ret;
225
226	pmu->initmsg_received = false;
227
228	nvkm_falcon_pio_wr(falcon, (u8 *)&args, 0, 0, DMEM, addr_args, sizeof(args), 0, false);
229	nvkm_falcon_start(falcon);
230	return 0;
231}
232
233const struct nvkm_pmu_func
234gm20b_pmu = {
235	.flcn = &gm200_pmu_flcn,
236	.init = gm20b_pmu_init,
237	.fini = gm20b_pmu_fini,
238	.intr = gt215_pmu_intr,
239	.recv = gm20b_pmu_recv,
240	.initmsg = gm20b_pmu_initmsg,
241	.reset = gf100_pmu_reset,
242};
243
244#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
245MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
246MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
247MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
248#endif
249
250int
251gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
252{
253	return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
254						 NVKM_ACR_LSF_PMU, "pmu/",
255						 ver, fwif->acr);
256}
257
258static const struct nvkm_pmu_fwif
259gm20b_pmu_fwif[] = {
260	{  0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
261	{ -1, gm200_pmu_nofw, &gm20b_pmu },
262	{}
263};
264
265int
266gm20b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
267	      struct nvkm_pmu **ppmu)
268{
269	return nvkm_pmu_new_(gm20b_pmu_fwif, device, type, inst, ppmu);
270}
271