1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "vmm.h"
23
24#include <core/client.h>
25#include <subdev/fb.h>
26#include <subdev/ltc.h>
27#include <subdev/timer.h>
28#include <engine/gr.h>
29
30#include <nvif/ifc00d.h>
31#include <nvif/unpack.h>
32
33static void
34gp100_vmm_pfn_unmap(struct nvkm_vmm *vmm,
35		    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
36{
37	struct device *dev = vmm->mmu->subdev.device->dev;
38	dma_addr_t addr;
39
40	nvkm_kmap(pt->memory);
41	while (ptes--) {
42		u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
43		u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
44		u64 data   = (u64)datahi << 32 | datalo;
45		if ((data & (3ULL << 1)) != 0) {
46			addr = (data >> 8) << 12;
47			dma_unmap_page(dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
48		}
49		ptei++;
50	}
51	nvkm_done(pt->memory);
52}
53
54static bool
55gp100_vmm_pfn_clear(struct nvkm_vmm *vmm,
56		    struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
57{
58	bool dma = false;
59	nvkm_kmap(pt->memory);
60	while (ptes--) {
61		u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0);
62		u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4);
63		u64 data   = (u64)datahi << 32 | datalo;
64		if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
65			VMM_WO064(pt, vmm, ptei * 8, data & ~BIT_ULL(0));
66			dma = true;
67		}
68		ptei++;
69	}
70	nvkm_done(pt->memory);
71	return dma;
72}
73
74static void
75gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
76		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
77{
78	struct device *dev = vmm->mmu->subdev.device->dev;
79	dma_addr_t addr;
80
81	nvkm_kmap(pt->memory);
82	for (; ptes; ptes--, map->pfn++) {
83		u64 data = 0;
84
85		if (!(*map->pfn & NVKM_VMM_PFN_V))
86			continue;
87
88		if (!(*map->pfn & NVKM_VMM_PFN_W))
89			data |= BIT_ULL(6); /* RO. */
90
91		if (!(*map->pfn & NVKM_VMM_PFN_A))
92			data |= BIT_ULL(7); /* Atomic disable. */
93
94		if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
95			addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
96			addr = dma_map_page(dev, pfn_to_page(addr), 0,
97					    PAGE_SIZE, DMA_BIDIRECTIONAL);
98			if (!WARN_ON(dma_mapping_error(dev, addr))) {
99				data |= addr >> 4;
100				data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
101				data |= BIT_ULL(3); /* VOL. */
102				data |= BIT_ULL(0); /* VALID. */
103			}
104		} else {
105			data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
106			data |= BIT_ULL(0); /* VALID. */
107		}
108
109		VMM_WO064(pt, vmm, ptei++ * 8, data);
110	}
111	nvkm_done(pt->memory);
112}
113
114static inline void
115gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
116		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
117{
118	u64 data = (addr >> 4) | map->type;
119
120	map->type += ptes * map->ctag;
121
122	while (ptes--) {
123		VMM_WO064(pt, vmm, ptei++ * 8, data);
124		data += map->next;
125	}
126}
127
128static void
129gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
130		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
131{
132	VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
133}
134
135static void
136gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
137		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
138{
139	if (map->page->shift == PAGE_SHIFT) {
140		VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
141		nvkm_kmap(pt->memory);
142		while (ptes--) {
143			const u64 data = (*map->dma++ >> 4) | map->type;
144			VMM_WO064(pt, vmm, ptei++ * 8, data);
145			map->type += map->ctag;
146		}
147		nvkm_done(pt->memory);
148		return;
149	}
150
151	VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
152}
153
154static void
155gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
156		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
157{
158	VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
159}
160
161static void
162gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
163		     struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
164{
165	/* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
166	VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
167}
168
169static const struct nvkm_vmm_desc_func
170gp100_vmm_desc_spt = {
171	.unmap = gf100_vmm_pgt_unmap,
172	.sparse = gp100_vmm_pgt_sparse,
173	.mem = gp100_vmm_pgt_mem,
174	.dma = gp100_vmm_pgt_dma,
175	.sgl = gp100_vmm_pgt_sgl,
176	.pfn = gp100_vmm_pgt_pfn,
177	.pfn_clear = gp100_vmm_pfn_clear,
178	.pfn_unmap = gp100_vmm_pfn_unmap,
179};
180
181static void
182gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
183		      struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
184{
185	/* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
186	VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
187}
188
189static const struct nvkm_vmm_desc_func
190gp100_vmm_desc_lpt = {
191	.invalid = gp100_vmm_lpt_invalid,
192	.unmap = gf100_vmm_pgt_unmap,
193	.sparse = gp100_vmm_pgt_sparse,
194	.mem = gp100_vmm_pgt_mem,
195};
196
197static inline void
198gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
199		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
200{
201	u64 data = (addr >> 4) | map->type;
202
203	map->type += ptes * map->ctag;
204
205	while (ptes--) {
206		VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
207		data += map->next;
208	}
209}
210
211static void
212gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
213		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
214{
215	VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
216}
217
218static inline bool
219gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
220{
221	switch (nvkm_memory_target(pt->memory)) {
222	case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
223	case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
224		*data |= BIT_ULL(3); /* VOL. */
225		break;
226	case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
227	default:
228		WARN_ON(1);
229		return false;
230	}
231	*data |= pt->addr >> 4;
232	return true;
233}
234
235static void
236gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
237{
238	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
239	struct nvkm_mmu_pt *pd = pgd->pt[0];
240	u64 data[2] = {};
241
242	if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
243		return;
244	if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
245		return;
246
247	nvkm_kmap(pd->memory);
248	VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
249	nvkm_done(pd->memory);
250}
251
252static void
253gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
254		     struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
255{
256	/* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
257	VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
258}
259
260static void
261gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
262		    struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
263{
264	VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
265}
266
267static void
268gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm,
269			struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
270{
271	struct device *dev = vmm->mmu->subdev.device->dev;
272	dma_addr_t addr;
273
274	nvkm_kmap(pt->memory);
275	while (ptes--) {
276		u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
277		u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
278		u64 data   = (u64)datahi << 32 | datalo;
279
280		if ((data & (3ULL << 1)) != 0) {
281			addr = (data >> 8) << 12;
282			dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
283		}
284		ptei++;
285	}
286	nvkm_done(pt->memory);
287}
288
289static bool
290gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
291			struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
292{
293	bool dma = false;
294
295	nvkm_kmap(pt->memory);
296	while (ptes--) {
297		u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
298		u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
299		u64 data   = (u64)datahi << 32 | datalo;
300
301		if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
302			VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0));
303			dma = true;
304		}
305		ptei++;
306	}
307	nvkm_done(pt->memory);
308	return dma;
309}
310
311static void
312gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
313		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
314{
315	struct device *dev = vmm->mmu->subdev.device->dev;
316	dma_addr_t addr;
317
318	nvkm_kmap(pt->memory);
319	for (; ptes; ptes--, map->pfn++) {
320		u64 data = 0;
321
322		if (!(*map->pfn & NVKM_VMM_PFN_V))
323			continue;
324
325		if (!(*map->pfn & NVKM_VMM_PFN_W))
326			data |= BIT_ULL(6); /* RO. */
327
328		if (!(*map->pfn & NVKM_VMM_PFN_A))
329			data |= BIT_ULL(7); /* Atomic disable. */
330
331		if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
332			addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
333			addr = dma_map_page(dev, pfn_to_page(addr), 0,
334					    1UL << 21, DMA_BIDIRECTIONAL);
335			if (!WARN_ON(dma_mapping_error(dev, addr))) {
336				data |= addr >> 4;
337				data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
338				data |= BIT_ULL(3); /* VOL. */
339				data |= BIT_ULL(0); /* VALID. */
340			}
341		} else {
342			data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
343			data |= BIT_ULL(0); /* VALID. */
344		}
345
346		VMM_WO064(pt, vmm, ptei++ * 16, data);
347	}
348	nvkm_done(pt->memory);
349}
350
351static const struct nvkm_vmm_desc_func
352gp100_vmm_desc_pd0 = {
353	.unmap = gp100_vmm_pd0_unmap,
354	.sparse = gp100_vmm_pd0_sparse,
355	.pde = gp100_vmm_pd0_pde,
356	.mem = gp100_vmm_pd0_mem,
357	.pfn = gp100_vmm_pd0_pfn,
358	.pfn_clear = gp100_vmm_pd0_pfn_clear,
359	.pfn_unmap = gp100_vmm_pd0_pfn_unmap,
360};
361
362static void
363gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
364{
365	struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
366	struct nvkm_mmu_pt *pd = pgd->pt[0];
367	u64 data = 0;
368
369	if (!gp100_vmm_pde(pgt->pt[0], &data))
370		return;
371
372	nvkm_kmap(pd->memory);
373	VMM_WO064(pd, vmm, pdei * 8, data);
374	nvkm_done(pd->memory);
375}
376
377static const struct nvkm_vmm_desc_func
378gp100_vmm_desc_pd1 = {
379	.unmap = gf100_vmm_pgt_unmap,
380	.sparse = gp100_vmm_pgt_sparse,
381	.pde = gp100_vmm_pd1_pde,
382};
383
384const struct nvkm_vmm_desc
385gp100_vmm_desc_16[] = {
386	{ LPT, 5,  8, 0x0100, &gp100_vmm_desc_lpt },
387	{ PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
388	{ PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
389	{ PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
390	{ PGD, 2,  8, 0x1000, &gp100_vmm_desc_pd1 },
391	{}
392};
393
394const struct nvkm_vmm_desc
395gp100_vmm_desc_12[] = {
396	{ SPT, 9,  8, 0x1000, &gp100_vmm_desc_spt },
397	{ PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
398	{ PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
399	{ PGD, 9,  8, 0x1000, &gp100_vmm_desc_pd1 },
400	{ PGD, 2,  8, 0x1000, &gp100_vmm_desc_pd1 },
401	{}
402};
403
404int
405gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
406		struct nvkm_vmm_map *map)
407{
408	const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
409	const struct nvkm_vmm_page *page = map->page;
410	union {
411		struct gp100_vmm_map_vn vn;
412		struct gp100_vmm_map_v0 v0;
413	} *args = argv;
414	struct nvkm_device *device = vmm->mmu->subdev.device;
415	struct nvkm_memory *memory = map->memory;
416	u8  kind, kind_inv, priv, ro, vol;
417	int kindn, aper, ret = -ENOSYS;
418	const u8 *kindm;
419
420	map->next = (1ULL << page->shift) >> 4;
421	map->type = 0;
422
423	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
424		vol  = !!args->v0.vol;
425		ro   = !!args->v0.ro;
426		priv = !!args->v0.priv;
427		kind =   args->v0.kind;
428	} else
429	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
430		vol  = target == NVKM_MEM_TARGET_HOST;
431		ro   = 0;
432		priv = 0;
433		kind = 0x00;
434	} else {
435		VMM_DEBUG(vmm, "args");
436		return ret;
437	}
438
439	aper = vmm->func->aper(target);
440	if (WARN_ON(aper < 0))
441		return aper;
442
443	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
444	if (kind >= kindn || kindm[kind] == kind_inv) {
445		VMM_DEBUG(vmm, "kind %02x", kind);
446		return -EINVAL;
447	}
448
449	if (kindm[kind] != kind) {
450		u64 tags = nvkm_memory_size(memory) >> 16;
451		if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
452			VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
453			return -EINVAL;
454		}
455
456		if (!map->no_comp) {
457			ret = nvkm_memory_tags_get(memory, device, tags,
458						   nvkm_ltc_tags_clear,
459						   &map->tags);
460			if (ret) {
461				VMM_DEBUG(vmm, "comp %d", ret);
462				return ret;
463			}
464		}
465
466		if (!map->no_comp && map->tags->mn) {
467			tags = map->tags->mn->offset + (map->offset >> 16);
468			map->ctag |= ((1ULL << page->shift) >> 16) << 36;
469			map->type |= tags << 36;
470			map->next |= map->ctag;
471		} else {
472			kind = kindm[kind];
473		}
474	}
475
476	map->type |= BIT(0);
477	map->type |= (u64)aper << 1;
478	map->type |= (u64) vol << 3;
479	map->type |= (u64)priv << 5;
480	map->type |= (u64)  ro << 6;
481	map->type |= (u64)kind << 56;
482	return 0;
483}
484
485static int
486gp100_vmm_fault_cancel(struct nvkm_vmm *vmm, void *argv, u32 argc)
487{
488	struct nvkm_device *device = vmm->mmu->subdev.device;
489	union {
490		struct gp100_vmm_fault_cancel_v0 v0;
491	} *args = argv;
492	int ret = -ENOSYS;
493	u32 aper;
494
495	if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false)))
496		return ret;
497
498	/* Translate MaxwellFaultBufferA instance pointer to the same
499	 * format as the NV_GR_FECS_CURRENT_CTX register.
500	 */
501	aper = (args->v0.inst >> 8) & 3;
502	args->v0.inst >>= 12;
503	args->v0.inst |= aper << 28;
504	args->v0.inst |= 0x80000000;
505
506	if (!WARN_ON(nvkm_gr_ctxsw_pause(device))) {
507		if (nvkm_gr_ctxsw_inst(device) == args->v0.inst) {
508			gf100_vmm_invalidate(vmm, 0x0000001b
509					     /* CANCEL_TARGETED. */ |
510					     (args->v0.hub    << 20) |
511					     (args->v0.gpc    << 15) |
512					     (args->v0.client << 9));
513		}
514		WARN_ON(nvkm_gr_ctxsw_resume(device));
515	}
516
517	return 0;
518}
519
520static int
521gp100_vmm_fault_replay(struct nvkm_vmm *vmm, void *argv, u32 argc)
522{
523	union {
524		struct gp100_vmm_fault_replay_vn vn;
525	} *args = argv;
526	int ret = -ENOSYS;
527
528	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
529		gf100_vmm_invalidate(vmm, 0x0000000b); /* REPLAY_GLOBAL. */
530	}
531
532	return ret;
533}
534
535int
536gp100_vmm_mthd(struct nvkm_vmm *vmm,
537	       struct nvkm_client *client, u32 mthd, void *argv, u32 argc)
538{
539	switch (mthd) {
540	case GP100_VMM_VN_FAULT_REPLAY:
541		return gp100_vmm_fault_replay(vmm, argv, argc);
542	case GP100_VMM_VN_FAULT_CANCEL:
543		return gp100_vmm_fault_cancel(vmm, argv, argc);
544	default:
545		break;
546	}
547	return -EINVAL;
548}
549
550void
551gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
552{
553	struct nvkm_device *device = vmm->mmu->subdev.device;
554	nvkm_wr32(device, 0x100cb8, lower_32_bits(addr));
555	nvkm_wr32(device, 0x100cec, upper_32_bits(addr));
556}
557
558void
559gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
560{
561	u32 type = 0;
562	if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
563		type |= 0x00000004; /* HUB_ONLY */
564	type |= 0x00000001; /* PAGE_ALL */
565	gf100_vmm_invalidate(vmm, type);
566}
567
568int
569gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
570{
571	u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */;
572	if (vmm->replay) {
573		base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */
574		base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */
575	}
576	return gf100_vmm_join_(vmm, inst, base);
577}
578
579static const struct nvkm_vmm_func
580gp100_vmm = {
581	.join = gp100_vmm_join,
582	.part = gf100_vmm_part,
583	.aper = gf100_vmm_aper,
584	.valid = gp100_vmm_valid,
585	.flush = gp100_vmm_flush,
586	.mthd = gp100_vmm_mthd,
587	.invalidate_pdb = gp100_vmm_invalidate_pdb,
588	.page = {
589		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
590		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
591		{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
592		{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
593		{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
594		{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
595		{}
596	}
597};
598
599int
600gp100_vmm_new_(const struct nvkm_vmm_func *func,
601	       struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
602	       void *argv, u32 argc, struct lock_class_key *key,
603	       const char *name, struct nvkm_vmm **pvmm)
604{
605	union {
606		struct gp100_vmm_vn vn;
607		struct gp100_vmm_v0 v0;
608	} *args = argv;
609	int ret = -ENOSYS;
610	bool replay;
611
612	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
613		replay = args->v0.fault_replay != 0;
614	} else
615	if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
616		replay = false;
617	} else
618		return ret;
619
620	ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
621	if (ret)
622		return ret;
623
624	(*pvmm)->replay = replay;
625	return 0;
626}
627
628int
629gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
630	      void *argv, u32 argc, struct lock_class_key *key,
631	      const char *name, struct nvkm_vmm **pvmm)
632{
633	return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
634			      argv, argc, key, name, pvmm);
635}
636