1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "uvmm.h"
23#include "umem.h"
24#include "ummu.h"
25
26#include <core/client.h>
27#include <core/memory.h>
28
29#include <nvif/if000c.h>
30#include <nvif/unpack.h>
31
32static const struct nvkm_object_func nvkm_uvmm;
33struct nvkm_vmm *
34nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
35{
36	struct nvkm_object *object;
37
38	object = nvkm_object_search(client, handle, &nvkm_uvmm);
39	if (IS_ERR(object))
40		return (void *)object;
41
42	return nvkm_vmm_ref(nvkm_uvmm(object)->vmm);
43}
44
45static int
46nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
47{
48	union {
49		struct nvif_vmm_pfnclr_v0 v0;
50	} *args = argv;
51	struct nvkm_vmm *vmm = uvmm->vmm;
52	int ret = -ENOSYS;
53	u64 addr, size;
54
55	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
56		addr = args->v0.addr;
57		size = args->v0.size;
58	} else
59		return ret;
60
61	if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
62		return -EINVAL;
63
64	if (size) {
65		mutex_lock(&vmm->mutex.vmm);
66		ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
67		mutex_unlock(&vmm->mutex.vmm);
68	}
69
70	return ret;
71}
72
73static int
74nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
75{
76	union {
77		struct nvif_vmm_pfnmap_v0 v0;
78	} *args = argv;
79	struct nvkm_vmm *vmm = uvmm->vmm;
80	int ret = -ENOSYS;
81	u64 addr, size, *phys;
82	u8  page;
83
84	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
85		page = args->v0.page;
86		addr = args->v0.addr;
87		size = args->v0.size;
88		phys = args->v0.phys;
89		if (argc != (size >> page) * sizeof(args->v0.phys[0]))
90			return -EINVAL;
91	} else
92		return ret;
93
94	if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
95		return -EINVAL;
96
97	if (size) {
98		mutex_lock(&vmm->mutex.vmm);
99		ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
100		mutex_unlock(&vmm->mutex.vmm);
101	}
102
103	return ret;
104}
105
106static int
107nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
108{
109	union {
110		struct nvif_vmm_unmap_v0 v0;
111	} *args = argv;
112	struct nvkm_vmm *vmm = uvmm->vmm;
113	struct nvkm_vma *vma;
114	int ret = -ENOSYS;
115	u64 addr;
116
117	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
118		addr = args->v0.addr;
119	} else
120		return ret;
121
122	if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
123		return -EINVAL;
124
125	mutex_lock(&vmm->mutex.vmm);
126	vma = nvkm_vmm_node_search(vmm, addr);
127	if (ret = -ENOENT, !vma || vma->addr != addr) {
128		VMM_DEBUG(vmm, "lookup %016llx: %016llx",
129			  addr, vma ? vma->addr : ~0ULL);
130		goto done;
131	}
132
133	if (ret = -ENOENT, vma->busy) {
134		VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
135		goto done;
136	}
137
138	if (ret = -EINVAL, !vma->memory) {
139		VMM_DEBUG(vmm, "unmapped");
140		goto done;
141	}
142
143	nvkm_vmm_unmap_locked(vmm, vma, false);
144	ret = 0;
145done:
146	mutex_unlock(&vmm->mutex.vmm);
147	return ret;
148}
149
150static int
151nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
152{
153	struct nvkm_client *client = uvmm->object.client;
154	union {
155		struct nvif_vmm_map_v0 v0;
156	} *args = argv;
157	u64 addr, size, handle, offset;
158	struct nvkm_vmm *vmm = uvmm->vmm;
159	struct nvkm_vma *vma;
160	struct nvkm_memory *memory;
161	int ret = -ENOSYS;
162
163	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
164		addr = args->v0.addr;
165		size = args->v0.size;
166		handle = args->v0.memory;
167		offset = args->v0.offset;
168	} else
169		return ret;
170
171	if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
172		return -EINVAL;
173
174	memory = nvkm_umem_search(client, handle);
175	if (IS_ERR(memory)) {
176		VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
177		return PTR_ERR(memory);
178	}
179
180	mutex_lock(&vmm->mutex.vmm);
181	if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
182		VMM_DEBUG(vmm, "lookup %016llx", addr);
183		goto fail;
184	}
185
186	if (ret = -ENOENT, vma->busy) {
187		VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
188		goto fail;
189	}
190
191	if (ret = -EINVAL, vma->mapped && !vma->memory) {
192		VMM_DEBUG(vmm, "pfnmap %016llx", addr);
193		goto fail;
194	}
195
196	if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
197		if (addr + size > vma->addr + vma->size || vma->memory ||
198		    (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
199			VMM_DEBUG(vmm, "split %d %d %d "
200				       "%016llx %016llx %016llx %016llx",
201				  !!vma->memory, vma->refd, vma->mapref,
202				  addr, size, vma->addr, (u64)vma->size);
203			goto fail;
204		}
205
206		vma = nvkm_vmm_node_split(vmm, vma, addr, size);
207		if (!vma) {
208			ret = -ENOMEM;
209			goto fail;
210		}
211	}
212	vma->busy = true;
213	mutex_unlock(&vmm->mutex.vmm);
214
215	ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
216	if (ret == 0) {
217		/* Successful map will clear vma->busy. */
218		nvkm_memory_unref(&memory);
219		return 0;
220	}
221
222	mutex_lock(&vmm->mutex.vmm);
223	vma->busy = false;
224	nvkm_vmm_unmap_region(vmm, vma);
225fail:
226	mutex_unlock(&vmm->mutex.vmm);
227	nvkm_memory_unref(&memory);
228	return ret;
229}
230
231static int
232nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
233{
234	union {
235		struct nvif_vmm_put_v0 v0;
236	} *args = argv;
237	struct nvkm_vmm *vmm = uvmm->vmm;
238	struct nvkm_vma *vma;
239	int ret = -ENOSYS;
240	u64 addr;
241
242	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
243		addr = args->v0.addr;
244	} else
245		return ret;
246
247	mutex_lock(&vmm->mutex.vmm);
248	vma = nvkm_vmm_node_search(vmm, args->v0.addr);
249	if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
250		VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
251			  vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
252		goto done;
253	}
254
255	if (ret = -ENOENT, vma->busy) {
256		VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy);
257		goto done;
258	}
259
260	nvkm_vmm_put_locked(vmm, vma);
261	ret = 0;
262done:
263	mutex_unlock(&vmm->mutex.vmm);
264	return ret;
265}
266
267static int
268nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
269{
270	union {
271		struct nvif_vmm_get_v0 v0;
272	} *args = argv;
273	struct nvkm_vmm *vmm = uvmm->vmm;
274	struct nvkm_vma *vma;
275	int ret = -ENOSYS;
276	bool getref, mapref, sparse;
277	u8 page, align;
278	u64 size;
279
280	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
281		getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
282		mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
283		sparse = args->v0.sparse;
284		page = args->v0.page;
285		align = args->v0.align;
286		size = args->v0.size;
287	} else
288		return ret;
289
290	mutex_lock(&vmm->mutex.vmm);
291	ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
292				  page, align, size, &vma);
293	mutex_unlock(&vmm->mutex.vmm);
294	if (ret)
295		return ret;
296
297	args->v0.addr = vma->addr;
298	return ret;
299}
300
301static int
302nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
303{
304	union {
305		struct nvif_vmm_page_v0 v0;
306	} *args = argv;
307	const struct nvkm_vmm_page *page;
308	int ret = -ENOSYS;
309	u8 type, index, nr;
310
311	page = uvmm->vmm->func->page;
312	for (nr = 0; page[nr].shift; nr++);
313
314	if (!(nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
315		if ((index = args->v0.index) >= nr)
316			return -EINVAL;
317		type = page[index].type;
318		args->v0.shift = page[index].shift;
319		args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
320		args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
321		args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
322		args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
323	} else
324		return -ENOSYS;
325
326	return 0;
327}
328
329static inline int
330nvkm_uvmm_page_index(struct nvkm_uvmm *uvmm, u64 size, u8 shift, u8 *refd)
331{
332	struct nvkm_vmm *vmm = uvmm->vmm;
333	const struct nvkm_vmm_page *page;
334
335	if (likely(shift)) {
336		for (page = vmm->func->page; page->shift; page++) {
337			if (shift == page->shift)
338				break;
339		}
340
341		if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
342			VMM_DEBUG(vmm, "page %d %016llx", shift, size);
343			return -EINVAL;
344		}
345	} else {
346		return -EINVAL;
347	}
348	*refd = page - vmm->func->page;
349
350	return 0;
351}
352
353static int
354nvkm_uvmm_mthd_raw_get(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
355{
356	struct nvkm_vmm *vmm = uvmm->vmm;
357	u8 refd;
358	int ret;
359
360	if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
361		return -EINVAL;
362
363	ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
364	if (ret)
365		return ret;
366
367	return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
368}
369
370static int
371nvkm_uvmm_mthd_raw_put(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
372{
373	struct nvkm_vmm *vmm = uvmm->vmm;
374	u8 refd;
375	int ret;
376
377	if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
378		return -EINVAL;
379
380	ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
381	if (ret)
382		return ret;
383
384	nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
385
386	return 0;
387}
388
389static int
390nvkm_uvmm_mthd_raw_map(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
391{
392	struct nvkm_client *client = uvmm->object.client;
393	struct nvkm_vmm *vmm = uvmm->vmm;
394	struct nvkm_vma vma = {
395		.addr = args->addr,
396		.size = args->size,
397		.used = true,
398		.mapref = false,
399		.no_comp = true,
400	};
401	struct nvkm_memory *memory;
402	void *argv = (void *)(uintptr_t)args->argv;
403	unsigned int argc = args->argc;
404	u64 handle = args->memory;
405	u8 refd;
406	int ret;
407
408	if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
409		return -EINVAL;
410
411	ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
412	if (ret)
413		return ret;
414
415	vma.page = vma.refd = refd;
416
417	memory = nvkm_umem_search(client, args->memory);
418	if (IS_ERR(memory)) {
419		VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
420		return PTR_ERR(memory);
421	}
422
423	ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc);
424
425	nvkm_memory_unref(&vma.memory);
426	nvkm_memory_unref(&memory);
427	return ret;
428}
429
430static int
431nvkm_uvmm_mthd_raw_unmap(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
432{
433	struct nvkm_vmm *vmm = uvmm->vmm;
434	u8 refd;
435	int ret;
436
437	if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
438		return -EINVAL;
439
440	ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
441	if (ret)
442		return ret;
443
444	nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
445			   args->sparse, refd);
446
447	return 0;
448}
449
450static int
451nvkm_uvmm_mthd_raw_sparse(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
452{
453	struct nvkm_vmm *vmm = uvmm->vmm;
454
455	if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
456		return -EINVAL;
457
458	return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
459}
460
461static int
462nvkm_uvmm_mthd_raw(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
463{
464	union {
465		struct nvif_vmm_raw_v0 v0;
466	} *args = argv;
467	int ret = -ENOSYS;
468
469	if (!uvmm->vmm->managed.raw)
470		return -EINVAL;
471
472	if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true)))
473		return ret;
474
475	switch (args->v0.op) {
476	case NVIF_VMM_RAW_V0_GET:
477		return nvkm_uvmm_mthd_raw_get(uvmm, &args->v0);
478	case NVIF_VMM_RAW_V0_PUT:
479		return nvkm_uvmm_mthd_raw_put(uvmm, &args->v0);
480	case NVIF_VMM_RAW_V0_MAP:
481		return nvkm_uvmm_mthd_raw_map(uvmm, &args->v0);
482	case NVIF_VMM_RAW_V0_UNMAP:
483		return nvkm_uvmm_mthd_raw_unmap(uvmm, &args->v0);
484	case NVIF_VMM_RAW_V0_SPARSE:
485		return nvkm_uvmm_mthd_raw_sparse(uvmm, &args->v0);
486	default:
487		return -EINVAL;
488	};
489}
490
491static int
492nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
493{
494	struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
495	switch (mthd) {
496	case NVIF_VMM_V0_PAGE  : return nvkm_uvmm_mthd_page  (uvmm, argv, argc);
497	case NVIF_VMM_V0_GET   : return nvkm_uvmm_mthd_get   (uvmm, argv, argc);
498	case NVIF_VMM_V0_PUT   : return nvkm_uvmm_mthd_put   (uvmm, argv, argc);
499	case NVIF_VMM_V0_MAP   : return nvkm_uvmm_mthd_map   (uvmm, argv, argc);
500	case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
501	case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
502	case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
503	case NVIF_VMM_V0_RAW   : return nvkm_uvmm_mthd_raw   (uvmm, argv, argc);
504	case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
505		if (uvmm->vmm->func->mthd) {
506			return uvmm->vmm->func->mthd(uvmm->vmm,
507						     uvmm->object.client,
508						     mthd, argv, argc);
509		}
510		break;
511	default:
512		break;
513	}
514	return -EINVAL;
515}
516
517static void *
518nvkm_uvmm_dtor(struct nvkm_object *object)
519{
520	struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
521	nvkm_vmm_unref(&uvmm->vmm);
522	return uvmm;
523}
524
525static const struct nvkm_object_func
526nvkm_uvmm = {
527	.dtor = nvkm_uvmm_dtor,
528	.mthd = nvkm_uvmm_mthd,
529};
530
531int
532nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
533	      struct nvkm_object **pobject)
534{
535	struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
536	const bool more = oclass->base.maxver >= 0;
537	union {
538		struct nvif_vmm_v0 v0;
539	} *args = argv;
540	const struct nvkm_vmm_page *page;
541	struct nvkm_uvmm *uvmm;
542	int ret = -ENOSYS;
543	u64 addr, size;
544	bool managed, raw;
545
546	if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
547		managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
548		raw = args->v0.type == NVIF_VMM_V0_TYPE_RAW;
549		addr = args->v0.addr;
550		size = args->v0.size;
551	} else
552		return ret;
553
554	if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
555		return -ENOMEM;
556
557	nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
558	*pobject = &uvmm->object;
559
560	if (!mmu->vmm) {
561		ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
562					  argv, argc, NULL, "user", &uvmm->vmm);
563		if (ret)
564			return ret;
565
566		uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
567	} else {
568		if (size)
569			return -EINVAL;
570
571		uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
572	}
573	uvmm->vmm->managed.raw = raw;
574
575	if (mmu->func->promote_vmm) {
576		ret = mmu->func->promote_vmm(uvmm->vmm);
577		if (ret)
578			return ret;
579	}
580
581	page = uvmm->vmm->func->page;
582	args->v0.page_nr = 0;
583	while (page && (page++)->shift)
584		args->v0.page_nr++;
585	args->v0.addr = uvmm->vmm->start;
586	args->v0.size = uvmm->vmm->limit;
587	return 0;
588}
589