1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2020 NVIDIA Corporation */
3
4#include <linux/host1x.h>
5#include <linux/iommu.h>
6#include <linux/list.h>
7
8#include <drm/drm_drv.h>
9#include <drm/drm_file.h>
10#include <drm/drm_utils.h>
11
12#include "drm.h"
13#include "uapi.h"
14
15static void tegra_drm_mapping_release(struct kref *ref)
16{
17	struct tegra_drm_mapping *mapping =
18		container_of(ref, struct tegra_drm_mapping, ref);
19
20	host1x_bo_unpin(mapping->map);
21	host1x_bo_put(mapping->bo);
22
23	kfree(mapping);
24}
25
26void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
27{
28	kref_put(&mapping->ref, tegra_drm_mapping_release);
29}
30
31static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
32{
33	struct tegra_drm_mapping *mapping;
34	unsigned long id;
35
36	if (context->memory_context)
37		host1x_memory_context_put(context->memory_context);
38
39	xa_for_each(&context->mappings, id, mapping)
40		tegra_drm_mapping_put(mapping);
41
42	xa_destroy(&context->mappings);
43
44	host1x_channel_put(context->channel);
45
46	kfree(context);
47}
48
49void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
50{
51	struct tegra_drm_context *context;
52	struct host1x_syncpt *sp;
53	unsigned long id;
54
55	xa_for_each(&file->contexts, id, context)
56		tegra_drm_channel_context_close(context);
57
58	xa_for_each(&file->syncpoints, id, sp)
59		host1x_syncpt_put(sp);
60
61	xa_destroy(&file->contexts);
62	xa_destroy(&file->syncpoints);
63}
64
65static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
66{
67	struct tegra_drm_client *client;
68
69	list_for_each_entry(client, &tegra->clients, list)
70		if (client->base.class == class)
71			return client;
72
73	return NULL;
74}
75
76int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
77{
78	struct host1x *host = tegra_drm_to_host1x(drm->dev_private);
79	struct tegra_drm_file *fpriv = file->driver_priv;
80	struct tegra_drm *tegra = drm->dev_private;
81	struct drm_tegra_channel_open *args = data;
82	struct tegra_drm_client *client = NULL;
83	struct tegra_drm_context *context;
84	int err;
85
86	if (args->flags)
87		return -EINVAL;
88
89	context = kzalloc(sizeof(*context), GFP_KERNEL);
90	if (!context)
91		return -ENOMEM;
92
93	client = tegra_drm_find_client(tegra, args->host1x_class);
94	if (!client) {
95		err = -ENODEV;
96		goto free;
97	}
98
99	if (client->shared_channel) {
100		context->channel = host1x_channel_get(client->shared_channel);
101	} else {
102		context->channel = host1x_channel_request(&client->base);
103		if (!context->channel) {
104			err = -EBUSY;
105			goto free;
106		}
107	}
108
109	/* Only allocate context if the engine supports context isolation. */
110	if (device_iommu_mapped(client->base.dev) && client->ops->can_use_memory_ctx) {
111		bool supported;
112
113		err = client->ops->can_use_memory_ctx(client, &supported);
114		if (err)
115			goto put_channel;
116
117		if (supported)
118			context->memory_context = host1x_memory_context_alloc(
119				host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
120
121		if (IS_ERR(context->memory_context)) {
122			if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
123				err = PTR_ERR(context->memory_context);
124				goto put_channel;
125			} else {
126				/*
127				 * OK, HW does not support contexts or contexts
128				 * are disabled.
129				 */
130				context->memory_context = NULL;
131			}
132		}
133	}
134
135	err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
136		       GFP_KERNEL);
137	if (err < 0)
138		goto put_memctx;
139
140	context->client = client;
141	xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
142
143	args->version = client->version;
144	args->capabilities = 0;
145
146	if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
147		args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
148
149	return 0;
150
151put_memctx:
152	if (context->memory_context)
153		host1x_memory_context_put(context->memory_context);
154put_channel:
155	host1x_channel_put(context->channel);
156free:
157	kfree(context);
158
159	return err;
160}
161
162int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
163{
164	struct tegra_drm_file *fpriv = file->driver_priv;
165	struct drm_tegra_channel_close *args = data;
166	struct tegra_drm_context *context;
167
168	mutex_lock(&fpriv->lock);
169
170	context = xa_load(&fpriv->contexts, args->context);
171	if (!context) {
172		mutex_unlock(&fpriv->lock);
173		return -EINVAL;
174	}
175
176	xa_erase(&fpriv->contexts, args->context);
177
178	mutex_unlock(&fpriv->lock);
179
180	tegra_drm_channel_context_close(context);
181
182	return 0;
183}
184
185int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
186{
187	struct tegra_drm_file *fpriv = file->driver_priv;
188	struct drm_tegra_channel_map *args = data;
189	struct tegra_drm_mapping *mapping;
190	struct tegra_drm_context *context;
191	enum dma_data_direction direction;
192	struct device *mapping_dev;
193	int err = 0;
194
195	if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
196		return -EINVAL;
197
198	mutex_lock(&fpriv->lock);
199
200	context = xa_load(&fpriv->contexts, args->context);
201	if (!context) {
202		mutex_unlock(&fpriv->lock);
203		return -EINVAL;
204	}
205
206	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
207	if (!mapping) {
208		err = -ENOMEM;
209		goto unlock;
210	}
211
212	kref_init(&mapping->ref);
213
214	if (context->memory_context)
215		mapping_dev = &context->memory_context->dev;
216	else
217		mapping_dev = context->client->base.dev;
218
219	mapping->bo = tegra_gem_lookup(file, args->handle);
220	if (!mapping->bo) {
221		err = -EINVAL;
222		goto free;
223	}
224
225	switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
226	case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
227		direction = DMA_BIDIRECTIONAL;
228		break;
229
230	case DRM_TEGRA_CHANNEL_MAP_WRITE:
231		direction = DMA_FROM_DEVICE;
232		break;
233
234	case DRM_TEGRA_CHANNEL_MAP_READ:
235		direction = DMA_TO_DEVICE;
236		break;
237
238	default:
239		err = -EINVAL;
240		goto put_gem;
241	}
242
243	mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
244	if (IS_ERR(mapping->map)) {
245		err = PTR_ERR(mapping->map);
246		goto put_gem;
247	}
248
249	mapping->iova = mapping->map->phys;
250	mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
251
252	err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
253		       GFP_KERNEL);
254	if (err < 0)
255		goto unpin;
256
257	mutex_unlock(&fpriv->lock);
258
259	return 0;
260
261unpin:
262	host1x_bo_unpin(mapping->map);
263put_gem:
264	host1x_bo_put(mapping->bo);
265free:
266	kfree(mapping);
267unlock:
268	mutex_unlock(&fpriv->lock);
269	return err;
270}
271
272int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
273{
274	struct tegra_drm_file *fpriv = file->driver_priv;
275	struct drm_tegra_channel_unmap *args = data;
276	struct tegra_drm_mapping *mapping;
277	struct tegra_drm_context *context;
278
279	mutex_lock(&fpriv->lock);
280
281	context = xa_load(&fpriv->contexts, args->context);
282	if (!context) {
283		mutex_unlock(&fpriv->lock);
284		return -EINVAL;
285	}
286
287	mapping = xa_erase(&context->mappings, args->mapping);
288
289	mutex_unlock(&fpriv->lock);
290
291	if (!mapping)
292		return -EINVAL;
293
294	tegra_drm_mapping_put(mapping);
295	return 0;
296}
297
298int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
299{
300	struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
301	struct tegra_drm_file *fpriv = file->driver_priv;
302	struct drm_tegra_syncpoint_allocate *args = data;
303	struct host1x_syncpt *sp;
304	int err;
305
306	if (args->id)
307		return -EINVAL;
308
309	sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
310	if (!sp)
311		return -EBUSY;
312
313	args->id = host1x_syncpt_id(sp);
314
315	err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
316	if (err) {
317		host1x_syncpt_put(sp);
318		return err;
319	}
320
321	return 0;
322}
323
324int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
325{
326	struct tegra_drm_file *fpriv = file->driver_priv;
327	struct drm_tegra_syncpoint_allocate *args = data;
328	struct host1x_syncpt *sp;
329
330	mutex_lock(&fpriv->lock);
331	sp = xa_erase(&fpriv->syncpoints, args->id);
332	mutex_unlock(&fpriv->lock);
333
334	if (!sp)
335		return -EINVAL;
336
337	host1x_syncpt_put(sp);
338
339	return 0;
340}
341
342int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
343{
344	struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
345	struct drm_tegra_syncpoint_wait *args = data;
346	signed long timeout_jiffies;
347	struct host1x_syncpt *sp;
348
349	if (args->padding != 0)
350		return -EINVAL;
351
352	sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
353	if (!sp)
354		return -EINVAL;
355
356	timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
357
358	return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
359}
360