1/* SPDX-License-Identifier: GPL-2.0 or MIT */
2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3/* Copyright 2023 Collabora ltd. */
4
5#ifndef __PANTHOR_GEM_H__
6#define __PANTHOR_GEM_H__
7
8#include <drm/drm_gem_shmem_helper.h>
9#include <drm/drm_mm.h>
10
11#include <linux/iosys-map.h>
12#include <linux/rwsem.h>
13
14struct panthor_vm;
15
16/**
17 * struct panthor_gem_object - Driver specific GEM object.
18 */
19struct panthor_gem_object {
20	/** @base: Inherit from drm_gem_shmem_object. */
21	struct drm_gem_shmem_object base;
22
23	/**
24	 * @exclusive_vm_root_gem: Root GEM of the exclusive VM this GEM object
25	 * is attached to.
26	 *
27	 * If @exclusive_vm_root_gem != NULL, any attempt to bind the GEM to a
28	 * different VM will fail.
29	 *
30	 * All FW memory objects have this field set to the root GEM of the MCU
31	 * VM.
32	 */
33	struct drm_gem_object *exclusive_vm_root_gem;
34
35	/**
36	 * @gpuva_list_lock: Custom GPUVA lock.
37	 *
38	 * Used to protect insertion of drm_gpuva elements to the
39	 * drm_gem_object.gpuva.list list.
40	 *
41	 * We can't use the GEM resv for that, because drm_gpuva_link() is
42	 * called in a dma-signaling path, where we're not allowed to take
43	 * resv locks.
44	 */
45	struct mutex gpuva_list_lock;
46
47	/** @flags: Combination of drm_panthor_bo_flags flags. */
48	u32 flags;
49};
50
51/**
52 * struct panthor_kernel_bo - Kernel buffer object.
53 *
54 * These objects are only manipulated by the kernel driver and not
55 * directly exposed to the userspace. The GPU address of a kernel
56 * BO might be passed to userspace though.
57 */
58struct panthor_kernel_bo {
59	/**
60	 * @obj: The GEM object backing this kernel buffer object.
61	 */
62	struct drm_gem_object *obj;
63
64	/**
65	 * @vm: VM this private buffer is attached to.
66	 */
67	struct panthor_vm *vm;
68
69	/**
70	 * @va_node: VA space allocated to this GEM.
71	 */
72	struct drm_mm_node va_node;
73
74	/**
75	 * @kmap: Kernel CPU mapping of @gem.
76	 */
77	void *kmap;
78};
79
80static inline
81struct panthor_gem_object *to_panthor_bo(struct drm_gem_object *obj)
82{
83	return container_of(to_drm_gem_shmem_obj(obj), struct panthor_gem_object, base);
84}
85
86struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size);
87
88struct drm_gem_object *
89panthor_gem_prime_import_sg_table(struct drm_device *ddev,
90				  struct dma_buf_attachment *attach,
91				  struct sg_table *sgt);
92
93int
94panthor_gem_create_with_handle(struct drm_file *file,
95			       struct drm_device *ddev,
96			       struct panthor_vm *exclusive_vm,
97			       u64 *size, u32 flags, uint32_t *handle);
98
99static inline u64
100panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
101{
102	return bo->va_node.start;
103}
104
105static inline size_t
106panthor_kernel_bo_size(struct panthor_kernel_bo *bo)
107{
108	return bo->obj->size;
109}
110
111static inline int
112panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
113{
114	struct iosys_map map;
115	int ret;
116
117	if (bo->kmap)
118		return 0;
119
120	ret = drm_gem_vmap_unlocked(bo->obj, &map);
121	if (ret)
122		return ret;
123
124	bo->kmap = map.vaddr;
125	return 0;
126}
127
128static inline void
129panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
130{
131	if (bo->kmap) {
132		struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
133
134		drm_gem_vunmap_unlocked(bo->obj, &map);
135		bo->kmap = NULL;
136	}
137}
138
139struct panthor_kernel_bo *
140panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
141			 size_t size, u32 bo_flags, u32 vm_map_flags,
142			 u64 gpu_va);
143
144void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
145
146#endif /* __PANTHOR_GEM_H__ */
147