1// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4#include "pvr_vm.h"
5
6#include "pvr_device.h"
7#include "pvr_drv.h"
8#include "pvr_gem.h"
9#include "pvr_mmu.h"
10#include "pvr_rogue_fwif.h"
11#include "pvr_rogue_heap_config.h"
12
13#include <drm/drm_exec.h>
14#include <drm/drm_gem.h>
15#include <drm/drm_gpuvm.h>
16
17#include <linux/container_of.h>
18#include <linux/err.h>
19#include <linux/errno.h>
20#include <linux/gfp_types.h>
21#include <linux/kref.h>
22#include <linux/mutex.h>
23#include <linux/stddef.h>
24
25/**
26 * DOC: Memory context
27 *
28 * This is the "top level" datatype in the VM code. It's exposed in the public
29 * API as an opaque handle.
30 */
31
32/**
33 * struct pvr_vm_context - Context type used to represent a single VM.
34 */
35struct pvr_vm_context {
36	/**
37	 * @pvr_dev: The PowerVR device to which this context is bound.
38	 * This binding is immutable for the life of the context.
39	 */
40	struct pvr_device *pvr_dev;
41
42	/** @mmu_ctx: The context for binding to physical memory. */
43	struct pvr_mmu_context *mmu_ctx;
44
45	/** @gpuvm_mgr: GPUVM object associated with this context. */
46	struct drm_gpuvm gpuvm_mgr;
47
48	/** @lock: Global lock on this VM. */
49	struct mutex lock;
50
51	/**
52	 * @fw_mem_ctx_obj: Firmware object representing firmware memory
53	 * context.
54	 */
55	struct pvr_fw_object *fw_mem_ctx_obj;
56
57	/** @ref_count: Reference count of object. */
58	struct kref ref_count;
59
60	/**
61	 * @dummy_gem: GEM object to enable VM reservation. All private BOs
62	 * should use the @dummy_gem.resv and not their own _resv field.
63	 */
64	struct drm_gem_object dummy_gem;
65};
66
67static inline
68struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
69{
70	return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
71}
72
73struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
74{
75	if (vm_ctx)
76		kref_get(&vm_ctx->ref_count);
77
78	return vm_ctx;
79}
80
81/**
82 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
83 *                                     page table structure behind a VM context.
84 * @vm_ctx: Target VM context.
85 */
86dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
87{
88	return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
89}
90
91/**
92 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
93 * @vm_ctx: Target VM context.
94 *
95 * This is used to allow private BOs to share a dma_resv for faster fence
96 * updates.
97 *
98 * Returns: The dma_resv pointer.
99 */
100struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
101{
102	return vm_ctx->dummy_gem.resv;
103}
104
105/**
106 * DOC: Memory mappings
107 */
108
109/**
110 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
111 */
112struct pvr_vm_gpuva {
113	/** @base: The wrapped drm_gpuva object. */
114	struct drm_gpuva base;
115};
116
117enum pvr_vm_bind_type {
118	PVR_VM_BIND_TYPE_MAP,
119	PVR_VM_BIND_TYPE_UNMAP,
120};
121
122/**
123 * struct pvr_vm_bind_op - Context of a map/unmap operation.
124 */
125struct pvr_vm_bind_op {
126	/** @type: Map or unmap. */
127	enum pvr_vm_bind_type type;
128
129	/** @pvr_obj: Object associated with mapping (map only). */
130	struct pvr_gem_object *pvr_obj;
131
132	/**
133	 * @vm_ctx: VM context where the mapping will be created or destroyed.
134	 */
135	struct pvr_vm_context *vm_ctx;
136
137	/** @mmu_op_ctx: MMU op context. */
138	struct pvr_mmu_op_context *mmu_op_ctx;
139
140	/** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
141	struct drm_gpuvm_bo *gpuvm_bo;
142
143	/**
144	 * @new_va: Prealloced VA mapping object (init in callback).
145	 * Used when creating a mapping.
146	 */
147	struct pvr_vm_gpuva *new_va;
148
149	/**
150	 * @prev_va: Prealloced VA mapping object (init in callback).
151	 * Used when a mapping or unmapping operation overlaps an existing
152	 * mapping and splits away the beginning into a new mapping.
153	 */
154	struct pvr_vm_gpuva *prev_va;
155
156	/**
157	 * @next_va: Prealloced VA mapping object (init in callback).
158	 * Used when a mapping or unmapping operation overlaps an existing
159	 * mapping and splits away the end into a new mapping.
160	 */
161	struct pvr_vm_gpuva *next_va;
162
163	/** @offset: Offset into @pvr_obj to begin mapping from. */
164	u64 offset;
165
166	/** @device_addr: Device-virtual address at the start of the mapping. */
167	u64 device_addr;
168
169	/** @size: Size of the desired mapping. */
170	u64 size;
171};
172
173/**
174 * pvr_vm_bind_op_exec() - Execute a single bind op.
175 * @bind_op: Bind op context.
176 *
177 * Returns:
178 *  * 0 on success,
179 *  * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
180 *    a callback function.
181 */
182static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
183{
184	switch (bind_op->type) {
185	case PVR_VM_BIND_TYPE_MAP:
186		return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
187					bind_op, bind_op->device_addr,
188					bind_op->size,
189					gem_from_pvr_gem(bind_op->pvr_obj),
190					bind_op->offset);
191
192	case PVR_VM_BIND_TYPE_UNMAP:
193		return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
194					  bind_op, bind_op->device_addr,
195					  bind_op->size);
196	}
197
198	/*
199	 * This shouldn't happen unless something went wrong
200	 * in drm_sched.
201	 */
202	WARN_ON(1);
203	return -EINVAL;
204}
205
206static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
207{
208	drm_gpuvm_bo_put(bind_op->gpuvm_bo);
209
210	kfree(bind_op->new_va);
211	kfree(bind_op->prev_va);
212	kfree(bind_op->next_va);
213
214	if (bind_op->pvr_obj)
215		pvr_gem_object_put(bind_op->pvr_obj);
216
217	if (bind_op->mmu_op_ctx)
218		pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
219}
220
221static int
222pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
223			struct pvr_vm_context *vm_ctx,
224			struct pvr_gem_object *pvr_obj, u64 offset,
225			u64 device_addr, u64 size)
226{
227	struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
228	const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
229	const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
230	struct sg_table *sgt;
231	u64 offset_plus_size;
232	int err;
233
234	if (check_add_overflow(offset, size, &offset_plus_size))
235		return -EINVAL;
236
237	if (is_user &&
238	    !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
239		return -EINVAL;
240	}
241
242	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
243	    offset & ~PAGE_MASK || size & ~PAGE_MASK ||
244	    offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
245		return -EINVAL;
246
247	bind_op->type = PVR_VM_BIND_TYPE_MAP;
248
249	dma_resv_lock(obj->resv, NULL);
250	bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
251	dma_resv_unlock(obj->resv);
252	if (IS_ERR(bind_op->gpuvm_bo))
253		return PTR_ERR(bind_op->gpuvm_bo);
254
255	bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
256	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
257	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
258	if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
259		err = -ENOMEM;
260		goto err_bind_op_fini;
261	}
262
263	/* Pin pages so they're ready for use. */
264	sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
265	err = PTR_ERR_OR_ZERO(sgt);
266	if (err)
267		goto err_bind_op_fini;
268
269	bind_op->mmu_op_ctx =
270		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
271	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
272	if (err) {
273		bind_op->mmu_op_ctx = NULL;
274		goto err_bind_op_fini;
275	}
276
277	bind_op->pvr_obj = pvr_obj;
278	bind_op->vm_ctx = vm_ctx;
279	bind_op->device_addr = device_addr;
280	bind_op->size = size;
281	bind_op->offset = offset;
282
283	return 0;
284
285err_bind_op_fini:
286	pvr_vm_bind_op_fini(bind_op);
287
288	return err;
289}
290
291static int
292pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
293			  struct pvr_vm_context *vm_ctx, u64 device_addr,
294			  u64 size)
295{
296	int err;
297
298	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
299		return -EINVAL;
300
301	bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
302
303	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
304	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
305	if (!bind_op->prev_va || !bind_op->next_va) {
306		err = -ENOMEM;
307		goto err_bind_op_fini;
308	}
309
310	bind_op->mmu_op_ctx =
311		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
312	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
313	if (err) {
314		bind_op->mmu_op_ctx = NULL;
315		goto err_bind_op_fini;
316	}
317
318	bind_op->vm_ctx = vm_ctx;
319	bind_op->device_addr = device_addr;
320	bind_op->size = size;
321
322	return 0;
323
324err_bind_op_fini:
325	pvr_vm_bind_op_fini(bind_op);
326
327	return err;
328}
329
330/**
331 * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
332 * @op: gpuva op containing the remap details.
333 * @op_ctx: Operation context.
334 *
335 * Context: Called by drm_gpuvm_sm_map following a successful mapping while
336 * @op_ctx.vm_ctx mutex is held.
337 *
338 * Return:
339 *  * 0 on success, or
340 *  * Any error returned by pvr_mmu_map().
341 */
342static int
343pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
344{
345	struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
346	struct pvr_vm_bind_op *ctx = op_ctx;
347	int err;
348
349	if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
350		return -EINVAL;
351
352	err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
353			  op->map.va.addr);
354	if (err)
355		return err;
356
357	drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
358	drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
359	ctx->new_va = NULL;
360
361	return 0;
362}
363
364/**
365 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
366 * @op: gpuva op containing the unmap details.
367 * @op_ctx: Operation context.
368 *
369 * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
370 * @op_ctx.vm_ctx mutex is held.
371 *
372 * Return:
373 *  * 0 on success, or
374 *  * Any error returned by pvr_mmu_unmap().
375 */
376static int
377pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
378{
379	struct pvr_vm_bind_op *ctx = op_ctx;
380
381	int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
382				op->unmap.va->va.range);
383
384	if (err)
385		return err;
386
387	drm_gpuva_unmap(&op->unmap);
388	drm_gpuva_unlink(op->unmap.va);
389
390	return 0;
391}
392
393/**
394 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
395 * @op: gpuva op containing the remap details.
396 * @op_ctx: Operation context.
397 *
398 * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
399 * mapping or unmapping operation causes a region to be split. The
400 * @op_ctx.vm_ctx mutex is held.
401 *
402 * Return:
403 *  * 0 on success, or
404 *  * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
405 */
406static int
407pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
408{
409	struct pvr_vm_bind_op *ctx = op_ctx;
410	u64 va_start = 0, va_range = 0;
411	int err;
412
413	drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
414	err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
415	if (err)
416		return err;
417
418	/* No actual remap required: the page table tree depth is fixed to 3,
419	 * and we use 4k page table entries only for now.
420	 */
421	drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
422
423	if (op->remap.prev) {
424		pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
425		drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
426		ctx->prev_va = NULL;
427	}
428
429	if (op->remap.next) {
430		pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
431		drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
432		ctx->next_va = NULL;
433	}
434
435	drm_gpuva_unlink(op->remap.unmap->va);
436
437	return 0;
438}
439
440/*
441 * Public API
442 *
443 * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
444 */
445
446/**
447 * pvr_device_addr_is_valid() - Tests whether a device-virtual address
448 *                              is valid.
449 * @device_addr: Virtual device address to test.
450 *
451 * Return:
452 *  * %true if @device_addr is within the valid range for a device page
453 *    table and is aligned to the device page size, or
454 *  * %false otherwise.
455 */
456bool
457pvr_device_addr_is_valid(u64 device_addr)
458{
459	return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
460	       (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
461}
462
463/**
464 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
465 * address and associated size are both valid.
466 * @vm_ctx: Target VM context.
467 * @device_addr: Virtual device address to test.
468 * @size: Size of the range based at @device_addr to test.
469 *
470 * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
471 * @device_addr + @size) to verify a device-virtual address range initially
472 * seems intuitive, but it produces a false-negative when the address range
473 * is right at the end of device-virtual address space.
474 *
475 * This function catches that corner case, as well as checking that
476 * @size is non-zero.
477 *
478 * Return:
479 *  * %true if @device_addr is device page aligned; @size is device page
480 *    aligned; the range specified by @device_addr and @size is within the
481 *    bounds of the device-virtual address space, and @size is non-zero, or
482 *  * %false otherwise.
483 */
484bool
485pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
486				   u64 device_addr, u64 size)
487{
488	return pvr_device_addr_is_valid(device_addr) &&
489	       drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
490	       size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
491	       (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
492}
493
494static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
495{
496	kfree(to_pvr_vm_context(gpuvm));
497}
498
499static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
500	.vm_free = pvr_gpuvm_free,
501	.sm_step_map = pvr_vm_gpuva_map,
502	.sm_step_remap = pvr_vm_gpuva_remap,
503	.sm_step_unmap = pvr_vm_gpuva_unmap,
504};
505
506static void
507fw_mem_context_init(void *cpu_ptr, void *priv)
508{
509	struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
510	struct pvr_vm_context *vm_ctx = priv;
511
512	fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
513	fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
514}
515
516/**
517 * pvr_vm_create_context() - Create a new VM context.
518 * @pvr_dev: Target PowerVR device.
519 * @is_userspace_context: %true if this context is for userspace. This will
520 *                        create a firmware memory context for the VM context
521 *                        and disable warnings when tearing down mappings.
522 *
523 * Return:
524 *  * A handle to the newly-minted VM context on success,
525 *  * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
526 *    missing or has an unsupported value,
527 *  * -%ENOMEM if allocation of the structure behind the opaque handle fails,
528 *    or
529 *  * Any error encountered while setting up internal structures.
530 */
531struct pvr_vm_context *
532pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
533{
534	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
535
536	struct pvr_vm_context *vm_ctx;
537	u16 device_addr_bits;
538
539	int err;
540
541	err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
542				&device_addr_bits);
543	if (err) {
544		drm_err(drm_dev,
545			"Failed to get device virtual address space bits\n");
546		return ERR_PTR(err);
547	}
548
549	if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
550		drm_err(drm_dev,
551			"Device has unsupported virtual address space size\n");
552		return ERR_PTR(-EINVAL);
553	}
554
555	vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
556	if (!vm_ctx)
557		return ERR_PTR(-ENOMEM);
558
559	vm_ctx->pvr_dev = pvr_dev;
560
561	vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
562	err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
563	if (err)
564		goto err_free;
565
566	if (is_userspace_context) {
567		err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
568					   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
569					   fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
570
571		if (err)
572			goto err_page_table_destroy;
573	}
574
575	drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
576	drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
577		       is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
578		       0, &pvr_dev->base, &vm_ctx->dummy_gem,
579		       0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
580
581	mutex_init(&vm_ctx->lock);
582	kref_init(&vm_ctx->ref_count);
583
584	return vm_ctx;
585
586err_page_table_destroy:
587	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
588
589err_free:
590	kfree(vm_ctx);
591
592	return ERR_PTR(err);
593}
594
595/**
596 * pvr_vm_context_release() - Teardown a VM context.
597 * @ref_count: Pointer to reference counter of the VM context.
598 *
599 * This function ensures that no mappings are left dangling by unmapping them
600 * all in order of ascending device-virtual address.
601 */
602static void
603pvr_vm_context_release(struct kref *ref_count)
604{
605	struct pvr_vm_context *vm_ctx =
606		container_of(ref_count, struct pvr_vm_context, ref_count);
607
608	if (vm_ctx->fw_mem_ctx_obj)
609		pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
610
611	WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
612			     vm_ctx->gpuvm_mgr.mm_range));
613
614	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
615	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
616	mutex_destroy(&vm_ctx->lock);
617
618	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
619}
620
621/**
622 * pvr_vm_context_lookup() - Look up VM context from handle
623 * @pvr_file: Pointer to pvr_file structure.
624 * @handle: Object handle.
625 *
626 * Takes reference on VM context object. Call pvr_vm_context_put() to release.
627 *
628 * Returns:
629 *  * The requested object on success, or
630 *  * %NULL on failure (object does not exist in list, or is not a VM context)
631 */
632struct pvr_vm_context *
633pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
634{
635	struct pvr_vm_context *vm_ctx;
636
637	xa_lock(&pvr_file->vm_ctx_handles);
638	vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
639	if (vm_ctx)
640		kref_get(&vm_ctx->ref_count);
641
642	xa_unlock(&pvr_file->vm_ctx_handles);
643
644	return vm_ctx;
645}
646
647/**
648 * pvr_vm_context_put() - Release a reference on a VM context
649 * @vm_ctx: Target VM context.
650 *
651 * Returns:
652 *  * %true if the VM context was destroyed, or
653 *  * %false if there are any references still remaining.
654 */
655bool
656pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
657{
658	if (vm_ctx)
659		return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
660
661	return true;
662}
663
664/**
665 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
666 * given file.
667 * @pvr_file: Pointer to pvr_file structure.
668 *
669 * Removes all vm_contexts associated with @pvr_file from the device VM context
670 * list and drops initial references. vm_contexts will then be destroyed once
671 * all outstanding references are dropped.
672 */
673void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
674{
675	struct pvr_vm_context *vm_ctx;
676	unsigned long handle;
677
678	xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
679		/* vm_ctx is not used here because that would create a race with xa_erase */
680		pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
681	}
682}
683
684static int
685pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
686{
687	struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
688	struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
689
690	/* Unmap operations don't have an object to lock. */
691	if (!pvr_obj)
692		return 0;
693
694	/* Acquire lock on the GEM being mapped. */
695	return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
696}
697
698/**
699 * pvr_vm_map() - Map a section of physical memory into a section of
700 * device-virtual memory.
701 * @vm_ctx: Target VM context.
702 * @pvr_obj: Target PowerVR memory object.
703 * @pvr_obj_offset: Offset into @pvr_obj to map from.
704 * @device_addr: Virtual device address at the start of the requested mapping.
705 * @size: Size of the requested mapping.
706 *
707 * No handle is returned to represent the mapping. Instead, callers should
708 * remember @device_addr and use that as a handle.
709 *
710 * Return:
711 *  * 0 on success,
712 *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
713 *    address; the region specified by @pvr_obj_offset and @size does not fall
714 *    entirely within @pvr_obj, or any part of the specified region of @pvr_obj
715 *    is not device-virtual page-aligned,
716 *  * Any error encountered while performing internal operations required to
717 *    destroy the mapping (returned from pvr_vm_gpuva_map or
718 *    pvr_vm_gpuva_remap).
719 */
720int
721pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
722	   u64 pvr_obj_offset, u64 device_addr, u64 size)
723{
724	struct pvr_vm_bind_op bind_op = {0};
725	struct drm_gpuvm_exec vm_exec = {
726		.vm = &vm_ctx->gpuvm_mgr,
727		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
728			 DRM_EXEC_IGNORE_DUPLICATES,
729		.extra = {
730			.fn = pvr_vm_lock_extra,
731			.priv = &bind_op,
732		},
733	};
734
735	int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
736					  pvr_obj_offset, device_addr,
737					  size);
738
739	if (err)
740		return err;
741
742	pvr_gem_object_get(pvr_obj);
743
744	err = drm_gpuvm_exec_lock(&vm_exec);
745	if (err)
746		goto err_cleanup;
747
748	err = pvr_vm_bind_op_exec(&bind_op);
749
750	drm_gpuvm_exec_unlock(&vm_exec);
751
752err_cleanup:
753	pvr_vm_bind_op_fini(&bind_op);
754
755	return err;
756}
757
758/**
759 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
760 * @vm_ctx: Target VM context.
761 * @device_addr: Virtual device address at the start of the target mapping.
762 * @size: Size of the target mapping.
763 *
764 * Return:
765 *  * 0 on success,
766 *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
767 *    address,
768 *  * Any error encountered while performing internal operations required to
769 *    destroy the mapping (returned from pvr_vm_gpuva_unmap or
770 *    pvr_vm_gpuva_remap).
771 */
772int
773pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
774{
775	struct pvr_vm_bind_op bind_op = {0};
776	struct drm_gpuvm_exec vm_exec = {
777		.vm = &vm_ctx->gpuvm_mgr,
778		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
779			 DRM_EXEC_IGNORE_DUPLICATES,
780		.extra = {
781			.fn = pvr_vm_lock_extra,
782			.priv = &bind_op,
783		},
784	};
785
786	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
787					    size);
788	if (err)
789		return err;
790
791	err = drm_gpuvm_exec_lock(&vm_exec);
792	if (err)
793		goto err_cleanup;
794
795	err = pvr_vm_bind_op_exec(&bind_op);
796
797	drm_gpuvm_exec_unlock(&vm_exec);
798
799err_cleanup:
800	pvr_vm_bind_op_fini(&bind_op);
801
802	return err;
803}
804
805/* Static data areas are determined by firmware. */
806static const struct drm_pvr_static_data_area static_data_areas[] = {
807	{
808		.area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
809		.location_heap_id = DRM_PVR_HEAP_GENERAL,
810		.offset = 0,
811		.size = 128,
812	},
813	{
814		.area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
815		.location_heap_id = DRM_PVR_HEAP_GENERAL,
816		.offset = 128,
817		.size = 1024,
818	},
819	{
820		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
821		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
822		.offset = 0,
823		.size = 128,
824	},
825	{
826		.area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
827		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
828		.offset = 128,
829		.size = 128,
830	},
831	{
832		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
833		.location_heap_id = DRM_PVR_HEAP_USC_CODE,
834		.offset = 0,
835		.size = 128,
836	},
837};
838
839#define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
840
841/*
842 * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
843 * static data area for each heap.
844 */
845static const struct drm_pvr_heap pvr_heaps[] = {
846	[DRM_PVR_HEAP_GENERAL] = {
847		.base = ROGUE_GENERAL_HEAP_BASE,
848		.size = ROGUE_GENERAL_HEAP_SIZE,
849		.flags = 0,
850		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
851	},
852	[DRM_PVR_HEAP_PDS_CODE_DATA] = {
853		.base = ROGUE_PDSCODEDATA_HEAP_BASE,
854		.size = ROGUE_PDSCODEDATA_HEAP_SIZE,
855		.flags = 0,
856		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
857	},
858	[DRM_PVR_HEAP_USC_CODE] = {
859		.base = ROGUE_USCCODE_HEAP_BASE,
860		.size = ROGUE_USCCODE_HEAP_SIZE,
861		.flags = 0,
862		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
863	},
864	[DRM_PVR_HEAP_RGNHDR] = {
865		.base = ROGUE_RGNHDR_HEAP_BASE,
866		.size = ROGUE_RGNHDR_HEAP_SIZE,
867		.flags = 0,
868		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
869	},
870	[DRM_PVR_HEAP_VIS_TEST] = {
871		.base = ROGUE_VISTEST_HEAP_BASE,
872		.size = ROGUE_VISTEST_HEAP_SIZE,
873		.flags = 0,
874		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
875	},
876	[DRM_PVR_HEAP_TRANSFER_FRAG] = {
877		.base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
878		.size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
879		.flags = 0,
880		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
881	},
882};
883
884int
885pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
886			  struct drm_pvr_ioctl_dev_query_args *args)
887{
888	struct drm_pvr_dev_query_static_data_areas query = {0};
889	int err;
890
891	if (!args->pointer) {
892		args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
893		return 0;
894	}
895
896	err = PVR_UOBJ_GET(query, args->size, args->pointer);
897	if (err < 0)
898		return err;
899
900	if (!query.static_data_areas.array) {
901		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
902		query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
903		goto copy_out;
904	}
905
906	if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
907		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
908
909	err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
910	if (err < 0)
911		return err;
912
913copy_out:
914	err = PVR_UOBJ_SET(args->pointer, args->size, query);
915	if (err < 0)
916		return err;
917
918	args->size = sizeof(query);
919	return 0;
920}
921
922int
923pvr_heap_info_get(const struct pvr_device *pvr_dev,
924		  struct drm_pvr_ioctl_dev_query_args *args)
925{
926	struct drm_pvr_dev_query_heap_info query = {0};
927	u64 dest;
928	int err;
929
930	if (!args->pointer) {
931		args->size = sizeof(struct drm_pvr_dev_query_heap_info);
932		return 0;
933	}
934
935	err = PVR_UOBJ_GET(query, args->size, args->pointer);
936	if (err < 0)
937		return err;
938
939	if (!query.heaps.array) {
940		query.heaps.count = ARRAY_SIZE(pvr_heaps);
941		query.heaps.stride = sizeof(struct drm_pvr_heap);
942		goto copy_out;
943	}
944
945	if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
946		query.heaps.count = ARRAY_SIZE(pvr_heaps);
947
948	/* Region header heap is only present if BRN63142 is present. */
949	dest = query.heaps.array;
950	for (size_t i = 0; i < query.heaps.count; i++) {
951		struct drm_pvr_heap heap = pvr_heaps[i];
952
953		if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
954			heap.size = 0;
955
956		err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
957		if (err < 0)
958			return err;
959
960		dest += query.heaps.stride;
961	}
962
963copy_out:
964	err = PVR_UOBJ_SET(args->pointer, args->size, query);
965	if (err < 0)
966		return err;
967
968	args->size = sizeof(query);
969	return 0;
970}
971
972/**
973 * pvr_heap_contains_range() - Determine if a given heap contains the specified
974 *                             device-virtual address range.
975 * @pvr_heap: Target heap.
976 * @start: Inclusive start of the target range.
977 * @end: Inclusive end of the target range.
978 *
979 * It is an error to call this function with values of @start and @end that do
980 * not satisfy the condition @start <= @end.
981 */
982static __always_inline bool
983pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
984{
985	return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
986}
987
988/**
989 * pvr_find_heap_containing() - Find a heap which contains the specified
990 *                              device-virtual address range.
991 * @pvr_dev: Target PowerVR device.
992 * @start: Start of the target range.
993 * @size: Size of the target range.
994 *
995 * Return:
996 *  * A pointer to a constant instance of struct drm_pvr_heap representing the
997 *    heap containing the entire range specified by @start and @size on
998 *    success, or
999 *  * %NULL if no such heap exists.
1000 */
1001const struct drm_pvr_heap *
1002pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1003{
1004	u64 end;
1005
1006	if (check_add_overflow(start, size - 1, &end))
1007		return NULL;
1008
1009	/*
1010	 * There are no guarantees about the order of address ranges in
1011	 * &pvr_heaps, so iterate over the entire array for a heap whose
1012	 * range completely encompasses the given range.
1013	 */
1014	for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1015		/* Filter heaps that present only with an associated quirk */
1016		if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1017		    !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1018			continue;
1019		}
1020
1021		if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1022			return &pvr_heaps[heap_id];
1023	}
1024
1025	return NULL;
1026}
1027
1028/**
1029 * pvr_vm_find_gem_object() - Look up a buffer object from a given
1030 *                            device-virtual address.
1031 * @vm_ctx: [IN] Target VM context.
1032 * @device_addr: [IN] Virtual device address at the start of the required
1033 *               object.
1034 * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1035 *                     of the mapped region within the buffer object. May be
1036 *                     %NULL if this information is not required.
1037 * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1038 *                   region. May be %NULL if this information is not required.
1039 *
1040 * If successful, a reference will be taken on the buffer object. The caller
1041 * must drop the reference with pvr_gem_object_put().
1042 *
1043 * Return:
1044 *  * The PowerVR buffer object mapped at @device_addr if one exists, or
1045 *  * %NULL otherwise.
1046 */
1047struct pvr_gem_object *
1048pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1049		       u64 *mapped_offset_out, u64 *mapped_size_out)
1050{
1051	struct pvr_gem_object *pvr_obj;
1052	struct drm_gpuva *va;
1053
1054	mutex_lock(&vm_ctx->lock);
1055
1056	va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1057	if (!va)
1058		goto err_unlock;
1059
1060	pvr_obj = gem_to_pvr_gem(va->gem.obj);
1061	pvr_gem_object_get(pvr_obj);
1062
1063	if (mapped_offset_out)
1064		*mapped_offset_out = va->gem.offset;
1065	if (mapped_size_out)
1066		*mapped_size_out = va->va.range;
1067
1068	mutex_unlock(&vm_ctx->lock);
1069
1070	return pvr_obj;
1071
1072err_unlock:
1073	mutex_unlock(&vm_ctx->lock);
1074
1075	return NULL;
1076}
1077
1078/**
1079 * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1080 * @vm_ctx: Target VM context.
1081 *
1082 * Returns:
1083 *  * FW object representing firmware memory context, or
1084 *  * %NULL if this VM context does not have a firmware memory context.
1085 */
1086struct pvr_fw_object *
1087pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1088{
1089	return vm_ctx->fw_mem_ctx_obj;
1090}
1091