1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2023 Loongson Technology Corporation Limited
4 */
5
6#include <linux/dma-buf.h>
7
8#include <drm/drm_debugfs.h>
9#include <drm/drm_file.h>
10#include <drm/drm_gem.h>
11#include <drm/drm_prime.h>
12
13#include "lsdc_drv.h"
14#include "lsdc_gem.h"
15#include "lsdc_ttm.h"
16
17static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
18{
19	struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
20	int ret;
21
22	ret = lsdc_bo_reserve(lbo);
23	if (unlikely(ret))
24		return ret;
25
26	ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
27	if (likely(ret == 0))
28		lbo->sharing_count++;
29
30	lsdc_bo_unreserve(lbo);
31
32	return ret;
33}
34
35static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
36{
37	struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
38	int ret;
39
40	ret = lsdc_bo_reserve(lbo);
41	if (unlikely(ret))
42		return;
43
44	lsdc_bo_unpin(lbo);
45	if (lbo->sharing_count)
46		lbo->sharing_count--;
47
48	lsdc_bo_unreserve(lbo);
49}
50
51static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
52{
53	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
54	struct ttm_tt *tt = tbo->ttm;
55
56	if (!tt) {
57		drm_err(obj->dev, "sharing a buffer without backing memory\n");
58		return ERR_PTR(-ENOMEM);
59	}
60
61	return drm_prime_pages_to_sg(obj->dev, tt->pages, tt->num_pages);
62}
63
64static void lsdc_gem_object_free(struct drm_gem_object *obj)
65{
66	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
67
68	if (tbo)
69		ttm_bo_put(tbo);
70}
71
72static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
73{
74	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
75	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
76	int ret;
77
78	if (lbo->vmap_count > 0) {
79		++lbo->vmap_count;
80		goto out;
81	}
82
83	ret = lsdc_bo_pin(lbo, 0, NULL);
84	if (unlikely(ret)) {
85		drm_err(obj->dev, "pin %p for vmap failed\n", lbo);
86		return ret;
87	}
88
89	ret = ttm_bo_vmap(tbo, &lbo->map);
90	if (ret) {
91		drm_err(obj->dev, "ttm bo vmap failed\n");
92		lsdc_bo_unpin(lbo);
93		return ret;
94	}
95
96	lbo->vmap_count = 1;
97
98out:
99	*map = lbo->map;
100
101	return 0;
102}
103
104static void lsdc_gem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
105{
106	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
107	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
108
109	if (unlikely(!lbo->vmap_count)) {
110		drm_warn(obj->dev, "%p is not mapped\n", lbo);
111		return;
112	}
113
114	--lbo->vmap_count;
115	if (lbo->vmap_count == 0) {
116		ttm_bo_vunmap(tbo, &lbo->map);
117
118		lsdc_bo_unpin(lbo);
119	}
120}
121
122static int lsdc_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
123{
124	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
125	int ret;
126
127	ret = ttm_bo_mmap_obj(vma, tbo);
128	if (unlikely(ret)) {
129		drm_warn(obj->dev, "mmap %p failed\n", tbo);
130		return ret;
131	}
132
133	drm_gem_object_put(obj);
134
135	return 0;
136}
137
138static const struct drm_gem_object_funcs lsdc_gem_object_funcs = {
139	.free = lsdc_gem_object_free,
140	.export = drm_gem_prime_export,
141	.pin = lsdc_gem_prime_pin,
142	.unpin = lsdc_gem_prime_unpin,
143	.get_sg_table = lsdc_gem_prime_get_sg_table,
144	.vmap = lsdc_gem_object_vmap,
145	.vunmap = lsdc_gem_object_vunmap,
146	.mmap = lsdc_gem_object_mmap,
147};
148
149struct drm_gem_object *lsdc_gem_object_create(struct drm_device *ddev,
150					      u32 domain,
151					      size_t size,
152					      bool kerenl,
153					      struct sg_table *sg,
154					      struct dma_resv *resv)
155{
156	struct lsdc_device *ldev = to_lsdc(ddev);
157	struct drm_gem_object *gobj;
158	struct lsdc_bo *lbo;
159	int ret;
160
161	lbo = lsdc_bo_create(ddev, domain, size, kerenl, sg, resv);
162	if (IS_ERR(lbo)) {
163		ret = PTR_ERR(lbo);
164		return ERR_PTR(ret);
165	}
166
167	if (!sg) {
168		/* VRAM is filled with random data */
169		lsdc_bo_clear(lbo);
170	}
171
172	gobj = &lbo->tbo.base;
173	gobj->funcs = &lsdc_gem_object_funcs;
174
175	/* tracking the BOs we created */
176	mutex_lock(&ldev->gem.mutex);
177	list_add_tail(&lbo->list, &ldev->gem.objects);
178	mutex_unlock(&ldev->gem.mutex);
179
180	return gobj;
181}
182
183struct drm_gem_object *
184lsdc_prime_import_sg_table(struct drm_device *ddev,
185			   struct dma_buf_attachment *attach,
186			   struct sg_table *sg)
187{
188	struct dma_resv *resv = attach->dmabuf->resv;
189	u64 size = attach->dmabuf->size;
190	struct drm_gem_object *gobj;
191	struct lsdc_bo *lbo;
192
193	dma_resv_lock(resv, NULL);
194	gobj = lsdc_gem_object_create(ddev, LSDC_GEM_DOMAIN_GTT, size, false,
195				      sg, resv);
196	dma_resv_unlock(resv);
197
198	if (IS_ERR(gobj)) {
199		drm_err(ddev, "Failed to import sg table\n");
200		return gobj;
201	}
202
203	lbo = gem_to_lsdc_bo(gobj);
204	lbo->sharing_count = 1;
205
206	return gobj;
207}
208
209int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
210		     struct drm_mode_create_dumb *args)
211{
212	struct lsdc_device *ldev = to_lsdc(ddev);
213	const struct lsdc_desc *descp = ldev->descp;
214	u32 domain = LSDC_GEM_DOMAIN_VRAM;
215	struct drm_gem_object *gobj;
216	size_t size;
217	u32 pitch;
218	u32 handle;
219	int ret;
220
221	if (!args->width || !args->height)
222		return -EINVAL;
223
224	if (args->bpp != 32 && args->bpp != 16)
225		return -EINVAL;
226
227	pitch = args->width * args->bpp / 8;
228	pitch = ALIGN(pitch, descp->pitch_align);
229	size = pitch * args->height;
230	size = ALIGN(size, PAGE_SIZE);
231
232	/* Maximum single bo size allowed is the half vram size available */
233	if (size > ldev->vram_size / 2) {
234		drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20);
235		return -ENOMEM;
236	}
237
238	gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL);
239	if (IS_ERR(gobj)) {
240		drm_err(ddev, "Failed to create gem object\n");
241		return PTR_ERR(gobj);
242	}
243
244	ret = drm_gem_handle_create(file, gobj, &handle);
245
246	/* drop reference from allocate, handle holds it now */
247	drm_gem_object_put(gobj);
248	if (ret)
249		return ret;
250
251	args->pitch = pitch;
252	args->size = size;
253	args->handle = handle;
254
255	return 0;
256}
257
258int lsdc_dumb_map_offset(struct drm_file *filp, struct drm_device *ddev,
259			 u32 handle, uint64_t *offset)
260{
261	struct drm_gem_object *gobj;
262
263	gobj = drm_gem_object_lookup(filp, handle);
264	if (!gobj)
265		return -ENOENT;
266
267	*offset = drm_vma_node_offset_addr(&gobj->vma_node);
268
269	drm_gem_object_put(gobj);
270
271	return 0;
272}
273
274void lsdc_gem_init(struct drm_device *ddev)
275{
276	struct lsdc_device *ldev = to_lsdc(ddev);
277
278	mutex_init(&ldev->gem.mutex);
279	INIT_LIST_HEAD(&ldev->gem.objects);
280}
281
282int lsdc_show_buffer_object(struct seq_file *m, void *arg)
283{
284	struct drm_info_node *node = (struct drm_info_node *)m->private;
285	struct drm_device *ddev = node->minor->dev;
286	struct lsdc_device *ldev = to_lsdc(ddev);
287	struct lsdc_bo *lbo;
288	unsigned int i;
289
290	mutex_lock(&ldev->gem.mutex);
291
292	i = 0;
293
294	list_for_each_entry(lbo, &ldev->gem.objects, list) {
295		struct ttm_buffer_object *tbo = &lbo->tbo;
296		struct ttm_resource *resource = tbo->resource;
297
298		seq_printf(m, "bo[%04u][%p]: size: %8zuKiB %s offset: %8llx\n",
299			   i, lbo, lsdc_bo_size(lbo) >> 10,
300			   lsdc_mem_type_to_str(resource->mem_type),
301			   lsdc_bo_gpu_offset(lbo));
302		i++;
303	}
304
305	mutex_unlock(&ldev->gem.mutex);
306
307	seq_printf(m, "Pinned BO size: VRAM: %zuKiB, GTT: %zu KiB\n",
308		   ldev->vram_pinned_size >> 10, ldev->gtt_pinned_size >> 10);
309
310	return 0;
311}
312