1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "gf100.h"
25#include "ctxgf100.h"
26
27#include <core/firmware.h>
28#include <subdev/acr.h>
29
30#include <nvfw/flcn.h>
31
32#include <nvif/class.h>
33
34int
35gm200_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
36{
37	nvkm_warn(&gr->base.engine.subdev, "firmware unavailable\n");
38	return -ENODEV;
39}
40
41/*******************************************************************************
42 * PGRAPH engine/subdev functions
43 ******************************************************************************/
44
45static void
46gm200_gr_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
47{
48	struct flcn_bl_dmem_desc_v1 hdr;
49	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
50	hdr.code_dma_base = hdr.code_dma_base + adjust;
51	hdr.data_dma_base = hdr.data_dma_base + adjust;
52	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
53	flcn_bl_dmem_desc_v1_dump(&acr->subdev, &hdr);
54}
55
56static void
57gm200_gr_acr_bld_write(struct nvkm_acr *acr, u32 bld,
58		       struct nvkm_acr_lsfw *lsfw)
59{
60	const u64 base = lsfw->offset.img + lsfw->app_start_offset;
61	const u64 code = base + lsfw->app_resident_code_offset;
62	const u64 data = base + lsfw->app_resident_data_offset;
63	const struct flcn_bl_dmem_desc_v1 hdr = {
64		.ctx_dma = FALCON_DMAIDX_UCODE,
65		.code_dma_base = code,
66		.non_sec_code_off = lsfw->app_resident_code_offset,
67		.non_sec_code_size = lsfw->app_resident_code_size,
68		.code_entry_point = lsfw->app_imem_entry,
69		.data_dma_base = data,
70		.data_size = lsfw->app_resident_data_size,
71	};
72
73	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
74}
75
76const struct nvkm_acr_lsf_func
77gm200_gr_gpccs_acr = {
78	.flags = NVKM_ACR_LSF_FORCE_PRIV_LOAD,
79	.bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
80	.bld_write = gm200_gr_acr_bld_write,
81	.bld_patch = gm200_gr_acr_bld_patch,
82};
83
84const struct nvkm_acr_lsf_func
85gm200_gr_fecs_acr = {
86	.bld_size = sizeof(struct flcn_bl_dmem_desc_v1),
87	.bld_write = gm200_gr_acr_bld_write,
88	.bld_patch = gm200_gr_acr_bld_patch,
89};
90
91int
92gm200_gr_rops(struct gf100_gr *gr)
93{
94	return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
95}
96
97void
98gm200_gr_init_ds_hww_esr_2(struct gf100_gr *gr)
99{
100	struct nvkm_device *device = gr->base.engine.subdev.device;
101	nvkm_wr32(device, 0x405848, 0xc0000000);
102	nvkm_mask(device, 0x40584c, 0x00000001, 0x00000001);
103}
104
105void
106gm200_gr_init_num_active_ltcs(struct gf100_gr *gr)
107{
108	struct nvkm_device *device = gr->base.engine.subdev.device;
109	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
110	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
111}
112
113void
114gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
115{
116	struct nvkm_device *device = gr->base.engine.subdev.device;
117
118	nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf0001fff);
119	nvkm_wr32(device, 0x418890, 0x00000000);
120	nvkm_wr32(device, 0x418894, 0x00000000);
121
122	nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
123	nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
124	nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
125}
126
127static void
128gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
129{
130	struct nvkm_device *device = gr->base.engine.subdev.device;
131	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
132	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
133	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
134}
135
136static u8
137gm200_gr_tile_map_6_24[] = {
138	0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5, 0, 1, 2,
139};
140
141static u8
142gm200_gr_tile_map_4_16[] = {
143	0, 1, 2, 3, 2, 3, 0, 1, 3, 0, 1, 2, 1, 2, 3, 0,
144};
145
146static u8
147gm200_gr_tile_map_2_8[] = {
148	0, 1, 1, 0, 0, 1, 1, 0,
149};
150
151int
152gm200_gr_oneinit_sm_id(struct gf100_gr *gr)
153{
154	/*XXX: There's a different algorithm here I've not yet figured out. */
155	return gf100_gr_oneinit_sm_id(gr);
156}
157
158void
159gm200_gr_oneinit_tiles(struct gf100_gr *gr)
160{
161	/*XXX: Not sure what this is about.  The algorithm from NVGPU
162	 *     seems to work for all boards I tried from earlier (and
163	 *     later) GPUs except in these specific configurations.
164	 *
165	 *     Let's just hardcode them for now.
166	 */
167	if (gr->gpc_nr == 2 && gr->tpc_total == 8) {
168		memcpy(gr->tile, gm200_gr_tile_map_2_8, gr->tpc_total);
169		gr->screen_tile_row_offset = 1;
170	} else
171	if (gr->gpc_nr == 4 && gr->tpc_total == 16) {
172		memcpy(gr->tile, gm200_gr_tile_map_4_16, gr->tpc_total);
173		gr->screen_tile_row_offset = 4;
174	} else
175	if (gr->gpc_nr == 6 && gr->tpc_total == 24) {
176		memcpy(gr->tile, gm200_gr_tile_map_6_24, gr->tpc_total);
177		gr->screen_tile_row_offset = 5;
178	} else {
179		gf100_gr_oneinit_tiles(gr);
180	}
181}
182
183static const struct gf100_gr_func
184gm200_gr = {
185	.oneinit_tiles = gm200_gr_oneinit_tiles,
186	.oneinit_sm_id = gm200_gr_oneinit_sm_id,
187	.init = gf100_gr_init,
188	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
189	.init_bios = gm107_gr_init_bios,
190	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
191	.init_zcull = gf117_gr_init_zcull,
192	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
193	.init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
194	.init_fecs_exceptions = gf100_gr_init_fecs_exceptions,
195	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
196	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
197	.init_419cc0 = gf100_gr_init_419cc0,
198	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
199	.init_tex_hww_esr = gf100_gr_init_tex_hww_esr,
200	.init_504430 = gm107_gr_init_504430,
201	.init_shader_exceptions = gm107_gr_init_shader_exceptions,
202	.init_rop_exceptions = gf100_gr_init_rop_exceptions,
203	.init_exception2 = gf100_gr_init_exception2,
204	.init_400054 = gm107_gr_init_400054,
205	.trap_mp = gf100_gr_trap_mp,
206	.fecs.reset = gf100_gr_fecs_reset,
207	.rops = gm200_gr_rops,
208	.tpc_nr = 4,
209	.ppc_nr = 2,
210	.grctx = &gm200_grctx,
211	.zbc = &gf100_gr_zbc,
212	.sclass = {
213		{ -1, -1, FERMI_TWOD_A },
214		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
215		{ -1, -1, MAXWELL_B, &gf100_fermi },
216		{ -1, -1, MAXWELL_COMPUTE_B },
217		{}
218	}
219};
220
221int
222gm200_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
223{
224	int ret;
225
226	ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
227						  &gr->fecs.falcon,
228						  NVKM_ACR_LSF_FECS,
229						  "gr/fecs_", ver, fwif->fecs);
230	if (ret)
231		return ret;
232
233	ret = nvkm_acr_lsfw_load_bl_inst_data_sig(&gr->base.engine.subdev,
234						  &gr->gpccs.falcon,
235						  NVKM_ACR_LSF_GPCCS,
236						  "gr/gpccs_", ver,
237						  fwif->gpccs);
238	if (ret)
239		return ret;
240
241	gr->firmware = true;
242
243	return gk20a_gr_load_sw(gr, "gr/", ver);
244}
245
246MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
247MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
248MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
249MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
250MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
251MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
252MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
253MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
254MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
255MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
256MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
257MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
258
259MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
260MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
261MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
262MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
263MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
264MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
265MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
266MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
267MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
268MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
269MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
270MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
271
272MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
273MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
274MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
275MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
276MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
277MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
278MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
279MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
280MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
281MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
282MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
283MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
284
285static const struct gf100_gr_fwif
286gm200_gr_fwif[] = {
287	{  0, gm200_gr_load, &gm200_gr, &gm200_gr_fecs_acr, &gm200_gr_gpccs_acr },
288	{ -1, gm200_gr_nofw },
289	{}
290};
291
292int
293gm200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
294{
295	return gf100_gr_new_(gm200_gr_fwif, device, type, inst, pgr);
296}
297