1// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4#include "pvr_context.h"
5#include "pvr_debugfs.h"
6#include "pvr_device.h"
7#include "pvr_drv.h"
8#include "pvr_free_list.h"
9#include "pvr_gem.h"
10#include "pvr_hwrt.h"
11#include "pvr_job.h"
12#include "pvr_mmu.h"
13#include "pvr_power.h"
14#include "pvr_rogue_defs.h"
15#include "pvr_rogue_fwif_client.h"
16#include "pvr_rogue_fwif_shared.h"
17#include "pvr_vm.h"
18
19#include <uapi/drm/pvr_drm.h>
20
21#include <drm/drm_device.h>
22#include <drm/drm_drv.h>
23#include <drm/drm_file.h>
24#include <drm/drm_gem.h>
25#include <drm/drm_ioctl.h>
26
27#include <linux/err.h>
28#include <linux/export.h>
29#include <linux/fs.h>
30#include <linux/kernel.h>
31#include <linux/mod_devicetable.h>
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/of_device.h>
35#include <linux/of_platform.h>
36#include <linux/platform_device.h>
37#include <linux/pm_runtime.h>
38#include <linux/xarray.h>
39
40/**
41 * DOC: PowerVR (Series 6 and later) and IMG Graphics Driver
42 *
43 * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
44 *
45 * * AXE-1-16M (found in Texas Instruments AM62)
46 */
47
48/**
49 * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object.
50 * @drm_dev: [IN] Target DRM device.
51 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
52 * &struct drm_pvr_ioctl_create_bo_args.
53 * @file: [IN] DRM file-private data.
54 *
55 * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO.
56 *
57 * Return:
58 *  * 0 on success,
59 *  * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero
60 *    or wider than &typedef size_t,
61 *  * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are
62 *    reserved or undefined are set,
63 *  * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not
64 *    zero,
65 *  * Any error encountered while creating the object (see
66 *    pvr_gem_object_create()), or
67 *  * Any error encountered while transferring ownership of the object into a
68 *    userspace-accessible handle (see pvr_gem_object_into_handle()).
69 */
70static int
71pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args,
72		    struct drm_file *file)
73{
74	struct drm_pvr_ioctl_create_bo_args *args = raw_args;
75	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
76	struct pvr_file *pvr_file = to_pvr_file(file);
77
78	struct pvr_gem_object *pvr_obj;
79	size_t sanitized_size;
80
81	int idx;
82	int err;
83
84	if (!drm_dev_enter(drm_dev, &idx))
85		return -EIO;
86
87	/* All padding fields must be zeroed. */
88	if (args->_padding_c != 0) {
89		err = -EINVAL;
90		goto err_drm_dev_exit;
91	}
92
93	/*
94	 * On 64-bit platforms (our primary target), size_t is a u64. However,
95	 * on other architectures we have to check for overflow when casting
96	 * down to size_t from u64.
97	 *
98	 * We also disallow zero-sized allocations, and reserved (kernel-only)
99	 * flags.
100	 */
101	if (args->size > SIZE_MAX || args->size == 0 || args->flags &
102	    ~DRM_PVR_BO_FLAGS_MASK || args->size & (PVR_DEVICE_PAGE_SIZE - 1)) {
103		err = -EINVAL;
104		goto err_drm_dev_exit;
105	}
106
107	sanitized_size = (size_t)args->size;
108
109	/*
110	 * Create a buffer object and transfer ownership to a userspace-
111	 * accessible handle.
112	 */
113	pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags);
114	if (IS_ERR(pvr_obj)) {
115		err = PTR_ERR(pvr_obj);
116		goto err_drm_dev_exit;
117	}
118
119	/* This function will not modify &args->handle unless it succeeds. */
120	err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle);
121	if (err)
122		goto err_destroy_obj;
123
124	drm_dev_exit(idx);
125
126	return 0;
127
128err_destroy_obj:
129	/*
130	 * GEM objects are refcounted, so there is no explicit destructor
131	 * function. Instead, we release the singular reference we currently
132	 * hold on the object and let GEM take care of the rest.
133	 */
134	pvr_gem_object_put(pvr_obj);
135
136err_drm_dev_exit:
137	drm_dev_exit(idx);
138
139	return err;
140}
141
142/**
143 * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be
144 * used when calling mmap() from userspace to map the given GEM buffer object
145 * @drm_dev: [IN] DRM device (unused).
146 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
147 *                     &struct drm_pvr_ioctl_get_bo_mmap_offset_args.
148 * @file: [IN] DRM file private data.
149 *
150 * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET.
151 *
152 * This IOCTL does *not* perform an mmap. See the docs on
153 * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details.
154 *
155 * Return:
156 *  * 0 on success,
157 *  * -%ENOENT if the handle does not reference a valid GEM buffer object,
158 *  * -%EINVAL if any padding fields in &struct
159 *    drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or
160 *  * Any error returned by drm_gem_create_mmap_offset().
161 */
162static int
163pvr_ioctl_get_bo_mmap_offset(struct drm_device *drm_dev, void *raw_args,
164			     struct drm_file *file)
165{
166	struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args;
167	struct pvr_file *pvr_file = to_pvr_file(file);
168	struct pvr_gem_object *pvr_obj;
169	struct drm_gem_object *gem_obj;
170	int idx;
171	int ret;
172
173	if (!drm_dev_enter(drm_dev, &idx))
174		return -EIO;
175
176	/* All padding fields must be zeroed. */
177	if (args->_padding_4 != 0) {
178		ret = -EINVAL;
179		goto err_drm_dev_exit;
180	}
181
182	/*
183	 * Obtain a kernel reference to the buffer object. This reference is
184	 * counted and must be manually dropped before returning. If a buffer
185	 * object cannot be found for the specified handle, return -%ENOENT (No
186	 * such file or directory).
187	 */
188	pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
189	if (!pvr_obj) {
190		ret = -ENOENT;
191		goto err_drm_dev_exit;
192	}
193
194	gem_obj = gem_from_pvr_gem(pvr_obj);
195
196	/*
197	 * Allocate a fake offset which can be used in userspace calls to mmap
198	 * on the DRM device file. If this fails, return the error code. This
199	 * operation is idempotent.
200	 */
201	ret = drm_gem_create_mmap_offset(gem_obj);
202	if (ret != 0) {
203		/* Drop our reference to the buffer object. */
204		drm_gem_object_put(gem_obj);
205		goto err_drm_dev_exit;
206	}
207
208	/*
209	 * Read out the fake offset allocated by the earlier call to
210	 * drm_gem_create_mmap_offset.
211	 */
212	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
213
214	/* Drop our reference to the buffer object. */
215	pvr_gem_object_put(pvr_obj);
216
217err_drm_dev_exit:
218	drm_dev_exit(idx);
219
220	return ret;
221}
222
223static __always_inline u64
224pvr_fw_version_packed(u32 major, u32 minor)
225{
226	return ((u64)major << 32) | minor;
227}
228
229static u32
230rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev)
231{
232	u32 max_partitions = 0;
233	u32 tile_size_x = 0;
234	u32 tile_size_y = 0;
235
236	PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x);
237	PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y);
238	PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions);
239
240	if (tile_size_x == 16 && tile_size_y == 16) {
241		u32 usc_min_output_registers_per_pix = 0;
242
243		PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix,
244				  &usc_min_output_registers_per_pix);
245
246		return tile_size_x * tile_size_y * max_partitions *
247		       usc_min_output_registers_per_pix;
248	}
249
250	return max_partitions * 1024;
251}
252
253static u32
254rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev)
255{
256	u32 common_store_size_in_dwords = 512 * 4 * 4;
257	u32 alloc_region_size;
258
259	PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords);
260
261	alloc_region_size = common_store_size_in_dwords - (256U * 4U) -
262			    rogue_get_common_store_partition_space_size(pvr_dev);
263
264	if (PVR_HAS_QUIRK(pvr_dev, 44079)) {
265		u32 common_store_split_point = (768U * 4U * 4U);
266
267		return min(common_store_split_point - (256U * 4U), alloc_region_size);
268	}
269
270	return alloc_region_size;
271}
272
273static inline u32
274rogue_get_num_phantoms(struct pvr_device *pvr_dev)
275{
276	u32 num_clusters = 1;
277
278	PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters);
279
280	return ROGUE_REQ_NUM_PHANTOMS(num_clusters);
281}
282
283static inline u32
284rogue_get_max_coeffs(struct pvr_device *pvr_dev)
285{
286	u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS;
287	u32 pending_allocation_shared_regs = 2U * 1024U;
288	u32 pending_allocation_coeff_regs = 0U;
289	u32 num_phantoms = rogue_get_num_phantoms(pvr_dev);
290	u32 tiles_in_flight = 0;
291	u32 max_coeff_pixel_portion;
292
293	PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight);
294	max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms);
295	max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS;
296
297	/*
298	 * Compute tasks on cores with BRN48492 and without compute overlap may lock
299	 * up without two additional lines of coeffs.
300	 */
301	if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap))
302		pending_allocation_coeff_regs = 2U * 1024U;
303
304	if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748))
305		pending_allocation_shared_regs = 0;
306
307	if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020))
308		max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS;
309
310	return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs -
311		(max_coeff_pixel_portion + max_coeff_additional_portion +
312		 pending_allocation_shared_regs);
313}
314
315static inline u32
316rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev)
317{
318	u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev);
319
320	if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) &&
321	    !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) {
322		/* Driver must not use the 2 reserved lines. */
323		available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2;
324	}
325
326	/*
327	 * The maximum amount of local memory available to a kernel is the minimum
328	 * of the total number of coefficient registers available and the max common
329	 * store allocation size which can be made by the CDM.
330	 *
331	 * If any coeff lines are reserved for tessellation or pixel then we need to
332	 * subtract those too.
333	 */
334	return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS);
335}
336
337/**
338 * pvr_dev_query_gpu_info_get()
339 * @pvr_dev: Device pointer.
340 * @args: [IN] Device query arguments containing a pointer to a userspace
341 *        struct drm_pvr_dev_query_gpu_info.
342 *
343 * If the query object pointer is NULL, the size field is updated with the
344 * expected size of the query object.
345 *
346 * Returns:
347 *  * 0 on success, or if size is requested using a NULL pointer, or
348 *  * -%E2BIG if the indicated length of the allocation is less than is
349 *    required to contain the copied data, or
350 *  * -%EFAULT if local memory could not be copied to userspace.
351 */
352static int
353pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev,
354			   struct drm_pvr_ioctl_dev_query_args *args)
355{
356	struct drm_pvr_dev_query_gpu_info gpu_info = {0};
357	int err;
358
359	if (!args->pointer) {
360		args->size = sizeof(struct drm_pvr_dev_query_gpu_info);
361		return 0;
362	}
363
364	gpu_info.gpu_id =
365		pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id);
366	gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev);
367
368	err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info);
369	if (err < 0)
370		return err;
371
372	if (args->size > sizeof(gpu_info))
373		args->size = sizeof(gpu_info);
374	return 0;
375}
376
377/**
378 * pvr_dev_query_runtime_info_get()
379 * @pvr_dev: Device pointer.
380 * @args: [IN] Device query arguments containing a pointer to a userspace
381 *        struct drm_pvr_dev_query_runtime_info.
382 *
383 * If the query object pointer is NULL, the size field is updated with the
384 * expected size of the query object.
385 *
386 * Returns:
387 *  * 0 on success, or if size is requested using a NULL pointer, or
388 *  * -%E2BIG if the indicated length of the allocation is less than is
389 *    required to contain the copied data, or
390 *  * -%EFAULT if local memory could not be copied to userspace.
391 */
392static int
393pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev,
394			       struct drm_pvr_ioctl_dev_query_args *args)
395{
396	struct drm_pvr_dev_query_runtime_info runtime_info = {0};
397	int err;
398
399	if (!args->pointer) {
400		args->size = sizeof(struct drm_pvr_dev_query_runtime_info);
401		return 0;
402	}
403
404	runtime_info.free_list_min_pages =
405		pvr_get_free_list_min_pages(pvr_dev);
406	runtime_info.free_list_max_pages =
407		ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE;
408	runtime_info.common_store_alloc_region_size =
409		rogue_get_common_store_alloc_region_size(pvr_dev);
410	runtime_info.common_store_partition_space_size =
411		rogue_get_common_store_partition_space_size(pvr_dev);
412	runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev);
413	runtime_info.cdm_max_local_mem_size_regs =
414		rogue_get_cdm_max_local_mem_size_regs(pvr_dev);
415
416	err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info);
417	if (err < 0)
418		return err;
419
420	if (args->size > sizeof(runtime_info))
421		args->size = sizeof(runtime_info);
422	return 0;
423}
424
425/**
426 * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given
427 * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required
428 * for it.
429 * @pvr_dev: Device pointer.
430 * @args: [IN] Device query arguments containing a pointer to a userspace
431 *        struct drm_pvr_dev_query_query_quirks.
432 *
433 * If the query object pointer is NULL, the size field is updated with the
434 * expected size of the query object.
435 * If the userspace pointer in the query object is NULL, or the count is
436 * short, no data is copied.
437 * The count field will be updated to that copied, or if either pointer is
438 * NULL, that which would have been copied.
439 * The size field in the query object will be updated to the size copied.
440 *
441 * Returns:
442 *  * 0 on success, or if size/count is requested using a NULL pointer, or
443 *  * -%EINVAL if args contained non-zero reserved fields, or
444 *  * -%E2BIG if the indicated length of the allocation is less than is
445 *    required to contain the copied data, or
446 *  * -%EFAULT if local memory could not be copied to userspace.
447 */
448static int
449pvr_dev_query_quirks_get(struct pvr_device *pvr_dev,
450			 struct drm_pvr_ioctl_dev_query_args *args)
451{
452	/*
453	 * @FIXME - hardcoding of numbers here is intended as an
454	 * intermediate step so the UAPI can be fixed, but requires a
455	 * a refactor in the future to store them in a more appropriate
456	 * location
457	 */
458	static const u32 umd_quirks_musthave[] = {
459		47217,
460		49927,
461		62269,
462	};
463	static const u32 umd_quirks[] = {
464		48545,
465		51764,
466	};
467	struct drm_pvr_dev_query_quirks query;
468	u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)];
469	size_t out_musthave_count = 0;
470	size_t out_count = 0;
471	int err;
472
473	if (!args->pointer) {
474		args->size = sizeof(struct drm_pvr_dev_query_quirks);
475		return 0;
476	}
477
478	err = PVR_UOBJ_GET(query, args->size, args->pointer);
479
480	if (err < 0)
481		return err;
482	if (query._padding_c)
483		return -EINVAL;
484
485	for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) {
486		if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) {
487			out[out_count++] = umd_quirks_musthave[i];
488			out_musthave_count++;
489		}
490	}
491
492	for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) {
493		if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i]))
494			out[out_count++] = umd_quirks[i];
495	}
496
497	if (!query.quirks)
498		goto copy_out;
499	if (query.count < out_count)
500		return -E2BIG;
501
502	if (copy_to_user(u64_to_user_ptr(query.quirks), out,
503			 out_count * sizeof(u32))) {
504		return -EFAULT;
505	}
506
507	query.musthave_count = out_musthave_count;
508
509copy_out:
510	query.count = out_count;
511	err = PVR_UOBJ_SET(args->pointer, args->size, query);
512	if (err < 0)
513		return err;
514
515	args->size = sizeof(query);
516	return 0;
517}
518
519/**
520 * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the
521 * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount
522 * of space required for it.
523 * @pvr_dev: Device pointer.
524 * @args: [IN] Device query arguments containing a pointer to a userspace
525 *        struct drm_pvr_dev_query_enhancements.
526 *
527 * If the query object pointer is NULL, the size field is updated with the
528 * expected size of the query object.
529 * If the userspace pointer in the query object is NULL, or the count is
530 * short, no data is copied.
531 * The count field will be updated to that copied, or if either pointer is
532 * NULL, that which would have been copied.
533 * The size field in the query object will be updated to the size copied.
534 *
535 * Returns:
536 *  * 0 on success, or if size/count is requested using a NULL pointer, or
537 *  * -%EINVAL if args contained non-zero reserved fields, or
538 *  * -%E2BIG if the indicated length of the allocation is less than is
539 *    required to contain the copied data, or
540 *  * -%EFAULT if local memory could not be copied to userspace.
541 */
542static int
543pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev,
544			       struct drm_pvr_ioctl_dev_query_args *args)
545{
546	/*
547	 * @FIXME - hardcoding of numbers here is intended as an
548	 * intermediate step so the UAPI can be fixed, but requires a
549	 * a refactor in the future to store them in a more appropriate
550	 * location
551	 */
552	const u32 umd_enhancements[] = {
553		35421,
554		42064,
555	};
556	struct drm_pvr_dev_query_enhancements query;
557	u32 out[ARRAY_SIZE(umd_enhancements)];
558	size_t out_idx = 0;
559	int err;
560
561	if (!args->pointer) {
562		args->size = sizeof(struct drm_pvr_dev_query_enhancements);
563		return 0;
564	}
565
566	err = PVR_UOBJ_GET(query, args->size, args->pointer);
567
568	if (err < 0)
569		return err;
570	if (query._padding_a)
571		return -EINVAL;
572	if (query._padding_c)
573		return -EINVAL;
574
575	for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) {
576		if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i]))
577			out[out_idx++] = umd_enhancements[i];
578	}
579
580	if (!query.enhancements)
581		goto copy_out;
582	if (query.count < out_idx)
583		return -E2BIG;
584
585	if (copy_to_user(u64_to_user_ptr(query.enhancements), out,
586			 out_idx * sizeof(u32))) {
587		return -EFAULT;
588	}
589
590copy_out:
591	query.count = out_idx;
592	err = PVR_UOBJ_SET(args->pointer, args->size, query);
593	if (err < 0)
594		return err;
595
596	args->size = sizeof(query);
597	return 0;
598}
599
600/**
601 * pvr_ioctl_dev_query() - IOCTL to copy information about a device
602 * @drm_dev: [IN] DRM device.
603 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
604 *                     &struct drm_pvr_ioctl_dev_query_args.
605 * @file: [IN] DRM file private data.
606 *
607 * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY.
608 * If the given receiving struct pointer is NULL, or the indicated size is too
609 * small, the expected size of the struct type will be returned in the size
610 * argument field.
611 *
612 * Return:
613 *  * 0 on success or when fetching the size with args->pointer == NULL, or
614 *  * -%E2BIG if the indicated size of the receiving struct is less than is
615 *    required to contain the copied data, or
616 *  * -%EINVAL if the indicated struct type is unknown, or
617 *  * -%ENOMEM if local memory could not be allocated, or
618 *  * -%EFAULT if local memory could not be copied to userspace.
619 */
620static int
621pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args,
622		    struct drm_file *file)
623{
624	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
625	struct drm_pvr_ioctl_dev_query_args *args = raw_args;
626	int idx;
627	int ret = -EINVAL;
628
629	if (!drm_dev_enter(drm_dev, &idx))
630		return -EIO;
631
632	switch ((enum drm_pvr_dev_query)args->type) {
633	case DRM_PVR_DEV_QUERY_GPU_INFO_GET:
634		ret = pvr_dev_query_gpu_info_get(pvr_dev, args);
635		break;
636
637	case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET:
638		ret = pvr_dev_query_runtime_info_get(pvr_dev, args);
639		break;
640
641	case DRM_PVR_DEV_QUERY_QUIRKS_GET:
642		ret = pvr_dev_query_quirks_get(pvr_dev, args);
643		break;
644
645	case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET:
646		ret = pvr_dev_query_enhancements_get(pvr_dev, args);
647		break;
648
649	case DRM_PVR_DEV_QUERY_HEAP_INFO_GET:
650		ret = pvr_heap_info_get(pvr_dev, args);
651		break;
652
653	case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET:
654		ret = pvr_static_data_areas_get(pvr_dev, args);
655		break;
656	}
657
658	drm_dev_exit(idx);
659
660	return ret;
661}
662
663/**
664 * pvr_ioctl_create_context() - IOCTL to create a context
665 * @drm_dev: [IN] DRM device.
666 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
667 *                     &struct drm_pvr_ioctl_create_context_args.
668 * @file: [IN] DRM file private data.
669 *
670 * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT.
671 *
672 * Return:
673 *  * 0 on success, or
674 *  * -%EINVAL if provided arguments are invalid, or
675 *  * -%EFAULT if arguments can't be copied from userspace, or
676 *  * Any error returned by pvr_create_render_context().
677 */
678static int
679pvr_ioctl_create_context(struct drm_device *drm_dev, void *raw_args,
680			 struct drm_file *file)
681{
682	struct drm_pvr_ioctl_create_context_args *args = raw_args;
683	struct pvr_file *pvr_file = file->driver_priv;
684	int idx;
685	int ret;
686
687	if (!drm_dev_enter(drm_dev, &idx))
688		return -EIO;
689
690	ret = pvr_context_create(pvr_file, args);
691
692	drm_dev_exit(idx);
693
694	return ret;
695}
696
697/**
698 * pvr_ioctl_destroy_context() - IOCTL to destroy a context
699 * @drm_dev: [IN] DRM device.
700 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
701 *                     &struct drm_pvr_ioctl_destroy_context_args.
702 * @file: [IN] DRM file private data.
703 *
704 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT.
705 *
706 * Return:
707 *  * 0 on success, or
708 *  * -%EINVAL if context not in context list.
709 */
710static int
711pvr_ioctl_destroy_context(struct drm_device *drm_dev, void *raw_args,
712			  struct drm_file *file)
713{
714	struct drm_pvr_ioctl_destroy_context_args *args = raw_args;
715	struct pvr_file *pvr_file = file->driver_priv;
716
717	if (args->_padding_4)
718		return -EINVAL;
719
720	return pvr_context_destroy(pvr_file, args->handle);
721}
722
723/**
724 * pvr_ioctl_create_free_list() - IOCTL to create a free list
725 * @drm_dev: [IN] DRM device.
726 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
727 *                     &struct drm_pvr_ioctl_create_free_list_args.
728 * @file: [IN] DRM file private data.
729 *
730 * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST.
731 *
732 * Return:
733 *  * 0 on success, or
734 *  * Any error returned by pvr_free_list_create().
735 */
736static int
737pvr_ioctl_create_free_list(struct drm_device *drm_dev, void *raw_args,
738			   struct drm_file *file)
739{
740	struct drm_pvr_ioctl_create_free_list_args *args = raw_args;
741	struct pvr_file *pvr_file = to_pvr_file(file);
742	struct pvr_free_list *free_list;
743	int idx;
744	int err;
745
746	if (!drm_dev_enter(drm_dev, &idx))
747		return -EIO;
748
749	free_list = pvr_free_list_create(pvr_file, args);
750	if (IS_ERR(free_list)) {
751		err = PTR_ERR(free_list);
752		goto err_drm_dev_exit;
753	}
754
755	/* Allocate object handle for userspace. */
756	err = xa_alloc(&pvr_file->free_list_handles,
757		       &args->handle,
758		       free_list,
759		       xa_limit_32b,
760		       GFP_KERNEL);
761	if (err < 0)
762		goto err_cleanup;
763
764	drm_dev_exit(idx);
765
766	return 0;
767
768err_cleanup:
769	pvr_free_list_put(free_list);
770
771err_drm_dev_exit:
772	drm_dev_exit(idx);
773
774	return err;
775}
776
777/**
778 * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list
779 * @drm_dev: [IN] DRM device.
780 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
781 *                 &struct drm_pvr_ioctl_destroy_free_list_args.
782 * @file: [IN] DRM file private data.
783 *
784 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST.
785 *
786 * Return:
787 *  * 0 on success, or
788 *  * -%EINVAL if free list not in object list.
789 */
790static int
791pvr_ioctl_destroy_free_list(struct drm_device *drm_dev, void *raw_args,
792			    struct drm_file *file)
793{
794	struct drm_pvr_ioctl_destroy_free_list_args *args = raw_args;
795	struct pvr_file *pvr_file = to_pvr_file(file);
796	struct pvr_free_list *free_list;
797
798	if (args->_padding_4)
799		return -EINVAL;
800
801	free_list = xa_erase(&pvr_file->free_list_handles, args->handle);
802	if (!free_list)
803		return -EINVAL;
804
805	pvr_free_list_put(free_list);
806	return 0;
807}
808
809/**
810 * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset
811 * @drm_dev: [IN] DRM device.
812 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
813 *                     &struct drm_pvr_ioctl_create_hwrt_dataset_args.
814 * @file: [IN] DRM file private data.
815 *
816 * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET.
817 *
818 * Return:
819 *  * 0 on success, or
820 *  * Any error returned by pvr_hwrt_dataset_create().
821 */
822static int
823pvr_ioctl_create_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
824			      struct drm_file *file)
825{
826	struct drm_pvr_ioctl_create_hwrt_dataset_args *args = raw_args;
827	struct pvr_file *pvr_file = to_pvr_file(file);
828	struct pvr_hwrt_dataset *hwrt;
829	int idx;
830	int err;
831
832	if (!drm_dev_enter(drm_dev, &idx))
833		return -EIO;
834
835	hwrt = pvr_hwrt_dataset_create(pvr_file, args);
836	if (IS_ERR(hwrt)) {
837		err = PTR_ERR(hwrt);
838		goto err_drm_dev_exit;
839	}
840
841	/* Allocate object handle for userspace. */
842	err = xa_alloc(&pvr_file->hwrt_handles,
843		       &args->handle,
844		       hwrt,
845		       xa_limit_32b,
846		       GFP_KERNEL);
847	if (err < 0)
848		goto err_cleanup;
849
850	drm_dev_exit(idx);
851
852	return 0;
853
854err_cleanup:
855	pvr_hwrt_dataset_put(hwrt);
856
857err_drm_dev_exit:
858	drm_dev_exit(idx);
859
860	return err;
861}
862
863/**
864 * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset
865 * @drm_dev: [IN] DRM device.
866 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
867 *                 &struct drm_pvr_ioctl_destroy_hwrt_dataset_args.
868 * @file: [IN] DRM file private data.
869 *
870 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET.
871 *
872 * Return:
873 *  * 0 on success, or
874 *  * -%EINVAL if HWRT dataset not in object list.
875 */
876static int
877pvr_ioctl_destroy_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
878			       struct drm_file *file)
879{
880	struct drm_pvr_ioctl_destroy_hwrt_dataset_args *args = raw_args;
881	struct pvr_file *pvr_file = to_pvr_file(file);
882	struct pvr_hwrt_dataset *hwrt;
883
884	if (args->_padding_4)
885		return -EINVAL;
886
887	hwrt = xa_erase(&pvr_file->hwrt_handles, args->handle);
888	if (!hwrt)
889		return -EINVAL;
890
891	pvr_hwrt_dataset_put(hwrt);
892	return 0;
893}
894
895/**
896 * pvr_ioctl_create_vm_context() - IOCTL to create a VM context
897 * @drm_dev: [IN] DRM device.
898 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
899 *                     &struct drm_pvr_ioctl_create_vm_context_args.
900 * @file: [IN] DRM file private data.
901 *
902 * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT.
903 *
904 * Return:
905 *  * 0 on success, or
906 *  * Any error returned by pvr_vm_create_context().
907 */
908static int
909pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args,
910			    struct drm_file *file)
911{
912	struct drm_pvr_ioctl_create_vm_context_args *args = raw_args;
913	struct pvr_file *pvr_file = to_pvr_file(file);
914	struct pvr_vm_context *vm_ctx;
915	int idx;
916	int err;
917
918	if (!drm_dev_enter(drm_dev, &idx))
919		return -EIO;
920
921	if (args->_padding_4) {
922		err = -EINVAL;
923		goto err_drm_dev_exit;
924	}
925
926	vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true);
927	if (IS_ERR(vm_ctx)) {
928		err = PTR_ERR(vm_ctx);
929		goto err_drm_dev_exit;
930	}
931
932	/* Allocate object handle for userspace. */
933	err = xa_alloc(&pvr_file->vm_ctx_handles,
934		       &args->handle,
935		       vm_ctx,
936		       xa_limit_32b,
937		       GFP_KERNEL);
938	if (err < 0)
939		goto err_cleanup;
940
941	drm_dev_exit(idx);
942
943	return 0;
944
945err_cleanup:
946	pvr_vm_context_put(vm_ctx);
947
948err_drm_dev_exit:
949	drm_dev_exit(idx);
950
951	return err;
952}
953
954/**
955 * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context
956���* @drm_dev: [IN] DRM device.
957���* @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
958���*                 &struct drm_pvr_ioctl_destroy_vm_context_args.
959���* @file: [IN] DRM file private data.
960���*
961���* Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT.
962���*
963���* Return:
964���*  * 0 on success, or
965���*  * -%EINVAL if object not in object list.
966 */
967static int
968pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args,
969			     struct drm_file *file)
970{
971	struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args;
972	struct pvr_file *pvr_file = to_pvr_file(file);
973	struct pvr_vm_context *vm_ctx;
974
975	if (args->_padding_4)
976		return -EINVAL;
977
978	vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle);
979	if (!vm_ctx)
980		return -EINVAL;
981
982	pvr_vm_context_put(vm_ctx);
983	return 0;
984}
985
986/**
987 * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space.
988 * @drm_dev: [IN] DRM device.
989 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
990 *                 &struct drm_pvr_ioctl_vm_map_args.
991 * @file: [IN] DRM file private data.
992 *
993 * Called from userspace with %DRM_IOCTL_PVR_VM_MAP.
994 *
995 * Return:
996 *  * 0 on success,
997 *  * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero,
998 *  * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset
999 *    and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall
1000 *    within the buffer object specified by
1001 *    &drm_pvr_ioctl_vm_op_map_args.handle,
1002 *  * -%EINVAL if the bounds specified by
1003 *    &drm_pvr_ioctl_vm_op_map_args.device_addr and
1004 *    &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual
1005 *    address range which falls entirely within a single heap, or
1006 *  * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a
1007 *    valid PowerVR buffer object.
1008 */
1009static int
1010pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args,
1011		 struct drm_file *file)
1012{
1013	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1014	struct drm_pvr_ioctl_vm_map_args *args = raw_args;
1015	struct pvr_file *pvr_file = to_pvr_file(file);
1016	struct pvr_vm_context *vm_ctx;
1017
1018	struct pvr_gem_object *pvr_obj;
1019	size_t pvr_obj_size;
1020
1021	u64 offset_plus_size;
1022	int idx;
1023	int err;
1024
1025	if (!drm_dev_enter(drm_dev, &idx))
1026		return -EIO;
1027
1028	/* Initial validation of args. */
1029	if (args->_padding_14) {
1030		err = -EINVAL;
1031		goto err_drm_dev_exit;
1032	}
1033
1034	if (args->flags != 0 ||
1035	    check_add_overflow(args->offset, args->size, &offset_plus_size) ||
1036	    !pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) {
1037		err = -EINVAL;
1038		goto err_drm_dev_exit;
1039	}
1040
1041	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
1042	if (!vm_ctx) {
1043		err = -EINVAL;
1044		goto err_drm_dev_exit;
1045	}
1046
1047	pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
1048	if (!pvr_obj) {
1049		err = -ENOENT;
1050		goto err_put_vm_context;
1051	}
1052
1053	pvr_obj_size = pvr_gem_object_size(pvr_obj);
1054
1055	/*
1056	 * Validate offset and size args. The alignment of these will be
1057	 * checked when mapping; for now just check that they're within valid
1058	 * bounds
1059	 */
1060	if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) {
1061		err = -EINVAL;
1062		goto err_put_pvr_object;
1063	}
1064
1065	err = pvr_vm_map(vm_ctx, pvr_obj, args->offset,
1066			 args->device_addr, args->size);
1067	if (err)
1068		goto err_put_pvr_object;
1069
1070	/*
1071	 * In order to set up the mapping, we needed a reference to &pvr_obj.
1072	 * However, pvr_vm_map() obtains and stores its own reference, so we
1073	 * must release ours before returning.
1074	 */
1075
1076err_put_pvr_object:
1077	pvr_gem_object_put(pvr_obj);
1078
1079err_put_vm_context:
1080	pvr_vm_context_put(vm_ctx);
1081
1082err_drm_dev_exit:
1083	drm_dev_exit(idx);
1084
1085	return err;
1086}
1087
1088/**
1089 * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space.
1090 * @drm_dev: [IN] DRM device.
1091 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1092 *                 &struct drm_pvr_ioctl_vm_unmap_args.
1093 * @file: [IN] DRM file private data.
1094 *
1095 * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP.
1096 *
1097 * Return:
1098 *  * 0 on success,
1099 *  * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid
1100 *    device page-aligned device-virtual address, or
1101 *  * -%ENOENT if there is currently no PowerVR buffer object mapped at
1102 *    &drm_pvr_ioctl_vm_op_unmap_args.device_addr.
1103 */
1104static int
1105pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args,
1106		   struct drm_file *file)
1107{
1108	struct drm_pvr_ioctl_vm_unmap_args *args = raw_args;
1109	struct pvr_file *pvr_file = to_pvr_file(file);
1110	struct pvr_vm_context *vm_ctx;
1111	int err;
1112
1113	/* Initial validation of args. */
1114	if (args->_padding_4)
1115		return -EINVAL;
1116
1117	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
1118	if (!vm_ctx)
1119		return -EINVAL;
1120
1121	err = pvr_vm_unmap(vm_ctx, args->device_addr, args->size);
1122
1123	pvr_vm_context_put(vm_ctx);
1124
1125	return err;
1126}
1127
1128/*
1129 * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU
1130 * @drm_dev: [IN] DRM device.
1131 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1132 *                 &struct drm_pvr_ioctl_submit_job_args.
1133 * @file: [IN] DRM file private data.
1134 *
1135 * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB.
1136 *
1137 * Return:
1138 *  * 0 on success, or
1139 *  * -%EINVAL if arguments are invalid.
1140 */
1141static int
1142pvr_ioctl_submit_jobs(struct drm_device *drm_dev, void *raw_args,
1143		      struct drm_file *file)
1144{
1145	struct drm_pvr_ioctl_submit_jobs_args *args = raw_args;
1146	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1147	struct pvr_file *pvr_file = to_pvr_file(file);
1148	int idx;
1149	int err;
1150
1151	if (!drm_dev_enter(drm_dev, &idx))
1152		return -EIO;
1153
1154	err = pvr_submit_jobs(pvr_dev, pvr_file, args);
1155
1156	drm_dev_exit(idx);
1157
1158	return err;
1159}
1160
1161int
1162pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out)
1163{
1164	if (usr_stride < min_stride)
1165		return -EINVAL;
1166
1167	return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride);
1168}
1169
1170int
1171pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in)
1172{
1173	if (usr_stride < min_stride)
1174		return -EINVAL;
1175
1176	if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size)))
1177		return -EFAULT;
1178
1179	if (usr_stride > obj_size &&
1180	    clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) {
1181		return -EFAULT;
1182	}
1183
1184	return 0;
1185}
1186
1187int
1188pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out)
1189{
1190	int ret = 0;
1191	void *out_alloc;
1192
1193	if (in->stride < min_stride)
1194		return -EINVAL;
1195
1196	if (!in->count)
1197		return 0;
1198
1199	out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
1200	if (!out_alloc)
1201		return -ENOMEM;
1202
1203	if (obj_size == in->stride) {
1204		if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
1205				   (unsigned long)obj_size * in->count))
1206			ret = -EFAULT;
1207	} else {
1208		void __user *in_ptr = u64_to_user_ptr(in->array);
1209		void *out_ptr = out_alloc;
1210
1211		for (u32 i = 0; i < in->count; i++) {
1212			ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
1213			if (ret)
1214				break;
1215
1216			out_ptr += obj_size;
1217			in_ptr += in->stride;
1218		}
1219	}
1220
1221	if (ret) {
1222		kvfree(out_alloc);
1223		return ret;
1224	}
1225
1226	*out = out_alloc;
1227	return 0;
1228}
1229
1230int
1231pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
1232		   const void *in)
1233{
1234	if (out->stride < min_stride)
1235		return -EINVAL;
1236
1237	if (!out->count)
1238		return 0;
1239
1240	if (obj_size == out->stride) {
1241		if (copy_to_user(u64_to_user_ptr(out->array), in,
1242				 (unsigned long)obj_size * out->count))
1243			return -EFAULT;
1244	} else {
1245		u32 cpy_elem_size = min_t(u32, out->stride, obj_size);
1246		void __user *out_ptr = u64_to_user_ptr(out->array);
1247		const void *in_ptr = in;
1248
1249		for (u32 i = 0; i < out->count; i++) {
1250			if (copy_to_user(out_ptr, in_ptr, cpy_elem_size))
1251				return -EFAULT;
1252
1253			out_ptr += obj_size;
1254			in_ptr += out->stride;
1255		}
1256
1257		if (out->stride > obj_size &&
1258		    clear_user(u64_to_user_ptr(out->array + obj_size),
1259			       out->stride - obj_size)) {
1260			return -EFAULT;
1261		}
1262	}
1263
1264	return 0;
1265}
1266
1267#define DRM_PVR_IOCTL(_name, _func, _flags) \
1268	DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags)
1269
1270/* clang-format off */
1271
1272static const struct drm_ioctl_desc pvr_drm_driver_ioctls[] = {
1273	DRM_PVR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
1274	DRM_PVR_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
1275	DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET, get_bo_mmap_offset, DRM_RENDER_ALLOW),
1276	DRM_PVR_IOCTL(CREATE_VM_CONTEXT, create_vm_context, DRM_RENDER_ALLOW),
1277	DRM_PVR_IOCTL(DESTROY_VM_CONTEXT, destroy_vm_context, DRM_RENDER_ALLOW),
1278	DRM_PVR_IOCTL(VM_MAP, vm_map, DRM_RENDER_ALLOW),
1279	DRM_PVR_IOCTL(VM_UNMAP, vm_unmap, DRM_RENDER_ALLOW),
1280	DRM_PVR_IOCTL(CREATE_CONTEXT, create_context, DRM_RENDER_ALLOW),
1281	DRM_PVR_IOCTL(DESTROY_CONTEXT, destroy_context, DRM_RENDER_ALLOW),
1282	DRM_PVR_IOCTL(CREATE_FREE_LIST, create_free_list, DRM_RENDER_ALLOW),
1283	DRM_PVR_IOCTL(DESTROY_FREE_LIST, destroy_free_list, DRM_RENDER_ALLOW),
1284	DRM_PVR_IOCTL(CREATE_HWRT_DATASET, create_hwrt_dataset, DRM_RENDER_ALLOW),
1285	DRM_PVR_IOCTL(DESTROY_HWRT_DATASET, destroy_hwrt_dataset, DRM_RENDER_ALLOW),
1286	DRM_PVR_IOCTL(SUBMIT_JOBS, submit_jobs, DRM_RENDER_ALLOW),
1287};
1288
1289/* clang-format on */
1290
1291#undef DRM_PVR_IOCTL
1292
1293/**
1294 * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened
1295 * @drm_dev: [IN] DRM device.
1296 * @file: [IN] DRM file private data.
1297 *
1298 * Allocates powervr-specific file private data (&struct pvr_file).
1299 *
1300 * Registered in &pvr_drm_driver.
1301 *
1302 * Return:
1303 *  * 0 on success,
1304 *  * -%ENOMEM if the allocation of a &struct ipvr_file fails, or
1305 *  * Any error returned by pvr_memory_context_init().
1306 */
1307static int
1308pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
1309{
1310	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1311	struct pvr_file *pvr_file;
1312
1313	pvr_file = kzalloc(sizeof(*pvr_file), GFP_KERNEL);
1314	if (!pvr_file)
1315		return -ENOMEM;
1316
1317	/*
1318	 * Store reference to base DRM file private data for use by
1319	 * from_pvr_file.
1320	 */
1321	pvr_file->file = file;
1322
1323	/*
1324	 * Store reference to powervr-specific outer device struct in file
1325	 * private data for convenient access.
1326	 */
1327	pvr_file->pvr_dev = pvr_dev;
1328
1329	xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
1330	xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
1331	xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
1332	xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1);
1333
1334	/*
1335	 * Store reference to powervr-specific file private data in DRM file
1336	 * private data.
1337	 */
1338	file->driver_priv = pvr_file;
1339
1340	return 0;
1341}
1342
1343/**
1344 * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct
1345 * drm_file is closed.
1346 * @drm_dev: [IN] DRM device (unused).
1347 * @file: [IN] DRM file private data.
1348 *
1349 * Frees powervr-specific file private data (&struct pvr_file).
1350 *
1351 * Registered in &pvr_drm_driver.
1352 */
1353static void
1354pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
1355			 struct drm_file *file)
1356{
1357	struct pvr_file *pvr_file = to_pvr_file(file);
1358
1359	/* Kill remaining contexts. */
1360	pvr_destroy_contexts_for_file(pvr_file);
1361
1362	/* Drop references on any remaining objects. */
1363	pvr_destroy_free_lists_for_file(pvr_file);
1364	pvr_destroy_hwrt_datasets_for_file(pvr_file);
1365	pvr_destroy_vm_contexts_for_file(pvr_file);
1366
1367	kfree(pvr_file);
1368	file->driver_priv = NULL;
1369}
1370
1371DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops);
1372
1373static struct drm_driver pvr_drm_driver = {
1374	.driver_features = DRIVER_GEM | DRIVER_GEM_GPUVA | DRIVER_RENDER |
1375			   DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
1376	.open = pvr_drm_driver_open,
1377	.postclose = pvr_drm_driver_postclose,
1378	.ioctls = pvr_drm_driver_ioctls,
1379	.num_ioctls = ARRAY_SIZE(pvr_drm_driver_ioctls),
1380	.fops = &pvr_drm_driver_fops,
1381#if defined(CONFIG_DEBUG_FS)
1382	.debugfs_init = pvr_debugfs_init,
1383#endif
1384
1385	.name = PVR_DRIVER_NAME,
1386	.desc = PVR_DRIVER_DESC,
1387	.date = PVR_DRIVER_DATE,
1388	.major = PVR_DRIVER_MAJOR,
1389	.minor = PVR_DRIVER_MINOR,
1390	.patchlevel = PVR_DRIVER_PATCHLEVEL,
1391
1392	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
1393	.gem_create_object = pvr_gem_create_object,
1394};
1395
1396static int
1397pvr_probe(struct platform_device *plat_dev)
1398{
1399	struct pvr_device *pvr_dev;
1400	struct drm_device *drm_dev;
1401	int err;
1402
1403	pvr_dev = devm_drm_dev_alloc(&plat_dev->dev, &pvr_drm_driver,
1404				     struct pvr_device, base);
1405	if (IS_ERR(pvr_dev))
1406		return PTR_ERR(pvr_dev);
1407
1408	drm_dev = &pvr_dev->base;
1409
1410	platform_set_drvdata(plat_dev, drm_dev);
1411
1412	init_rwsem(&pvr_dev->reset_sem);
1413
1414	pvr_context_device_init(pvr_dev);
1415
1416	err = pvr_queue_device_init(pvr_dev);
1417	if (err)
1418		goto err_context_fini;
1419
1420	devm_pm_runtime_enable(&plat_dev->dev);
1421	pm_runtime_mark_last_busy(&plat_dev->dev);
1422
1423	pm_runtime_set_autosuspend_delay(&plat_dev->dev, 50);
1424	pm_runtime_use_autosuspend(&plat_dev->dev);
1425	pvr_watchdog_init(pvr_dev);
1426
1427	err = pvr_device_init(pvr_dev);
1428	if (err)
1429		goto err_watchdog_fini;
1430
1431	err = drm_dev_register(drm_dev, 0);
1432	if (err)
1433		goto err_device_fini;
1434
1435	xa_init_flags(&pvr_dev->free_list_ids, XA_FLAGS_ALLOC1);
1436	xa_init_flags(&pvr_dev->job_ids, XA_FLAGS_ALLOC1);
1437
1438	return 0;
1439
1440err_device_fini:
1441	pvr_device_fini(pvr_dev);
1442
1443err_watchdog_fini:
1444	pvr_watchdog_fini(pvr_dev);
1445
1446	pvr_queue_device_fini(pvr_dev);
1447
1448err_context_fini:
1449	pvr_context_device_fini(pvr_dev);
1450
1451	return err;
1452}
1453
1454static int
1455pvr_remove(struct platform_device *plat_dev)
1456{
1457	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
1458	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1459
1460	WARN_ON(!xa_empty(&pvr_dev->job_ids));
1461	WARN_ON(!xa_empty(&pvr_dev->free_list_ids));
1462
1463	xa_destroy(&pvr_dev->job_ids);
1464	xa_destroy(&pvr_dev->free_list_ids);
1465
1466	pm_runtime_suspend(drm_dev->dev);
1467	pvr_device_fini(pvr_dev);
1468	drm_dev_unplug(drm_dev);
1469	pvr_watchdog_fini(pvr_dev);
1470	pvr_queue_device_fini(pvr_dev);
1471	pvr_context_device_fini(pvr_dev);
1472
1473	return 0;
1474}
1475
1476static const struct of_device_id dt_match[] = {
1477	{ .compatible = "img,img-axe", .data = NULL },
1478	{}
1479};
1480MODULE_DEVICE_TABLE(of, dt_match);
1481
1482static const struct dev_pm_ops pvr_pm_ops = {
1483	RUNTIME_PM_OPS(pvr_power_device_suspend, pvr_power_device_resume, pvr_power_device_idle)
1484};
1485
1486static struct platform_driver pvr_driver = {
1487	.probe = pvr_probe,
1488	.remove = pvr_remove,
1489	.driver = {
1490		.name = PVR_DRIVER_NAME,
1491		.pm = &pvr_pm_ops,
1492		.of_match_table = dt_match,
1493	},
1494};
1495module_platform_driver(pvr_driver);
1496
1497MODULE_AUTHOR("Imagination Technologies Ltd.");
1498MODULE_DESCRIPTION(PVR_DRIVER_DESC);
1499MODULE_LICENSE("Dual MIT/GPL");
1500MODULE_IMPORT_NS(DMA_BUF);
1501MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
1502