1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 *          Alex Deucher
26 *          Jerome Glisse
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <dev/drm2/drmP.h>
33#include "radeon.h"
34#include <dev/drm2/radeon/radeon_drm.h>
35#include "radeon_asic.h"
36#include "radeon_kms.h"
37
38/**
39 * radeon_driver_unload_kms - Main unload function for KMS.
40 *
41 * @dev: drm dev pointer
42 *
43 * This is the main unload function for KMS (all asics).
44 * It calls radeon_modeset_fini() to tear down the
45 * displays, and radeon_device_fini() to tear down
46 * the rest of the device (CP, writeback, etc.).
47 * Returns 0 on success.
48 */
49int radeon_driver_unload_kms(struct drm_device *dev)
50{
51	struct radeon_device *rdev = dev->dev_private;
52
53	if (rdev == NULL)
54		return 0;
55	if (rdev->rmmio == NULL)
56		goto done_free;
57	radeon_acpi_fini(rdev);
58	radeon_modeset_fini(rdev);
59	radeon_device_fini(rdev);
60
61done_free:
62	free(rdev, DRM_MEM_DRIVER);
63	dev->dev_private = NULL;
64	return 0;
65}
66
67/**
68 * radeon_driver_load_kms - Main load function for KMS.
69 *
70 * @dev: drm dev pointer
71 * @flags: device flags
72 *
73 * This is the main load function for KMS (all asics).
74 * It calls radeon_device_init() to set up the non-display
75 * parts of the chip (asic init, CP, writeback, etc.), and
76 * radeon_modeset_init() to set up the display parts
77 * (crtcs, encoders, hotplug detect, etc.).
78 * Returns 0 on success, error on failure.
79 */
80int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
81{
82	struct radeon_device *rdev;
83	int r, acpi_status;
84
85	rdev = malloc(sizeof(struct radeon_device), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
86	if (rdev == NULL) {
87		return -ENOMEM;
88	}
89	dev->dev_private = (void *)rdev;
90
91	/* update BUS flag */
92	if (drm_pci_device_is_agp(dev)) {
93		DRM_INFO("RADEON_IS_AGP\n");
94		flags |= RADEON_IS_AGP;
95	} else if (drm_pci_device_is_pcie(dev)) {
96		DRM_INFO("RADEON_IS_PCIE\n");
97		flags |= RADEON_IS_PCIE;
98	} else {
99		DRM_INFO("RADEON_IS_PCI\n");
100		flags |= RADEON_IS_PCI;
101	}
102
103	/* radeon_device_init should report only fatal error
104	 * like memory allocation failure or iomapping failure,
105	 * or memory manager initialization failure, it must
106	 * properly initialize the GPU MC controller and permit
107	 * VRAM allocation
108	 */
109	r = radeon_device_init(rdev, dev, flags);
110	if (r) {
111		dev_err(dev->dev, "Fatal error during GPU init\n");
112		goto out;
113	}
114
115	/* Again modeset_init should fail only on fatal error
116	 * otherwise it should provide enough functionalities
117	 * for shadowfb to run
118	 */
119	r = radeon_modeset_init(rdev);
120	if (r)
121		dev_err(dev->dev, "Fatal error during modeset init\n");
122
123	/* Call ACPI methods: require modeset init
124	 * but failure is not fatal
125	 */
126	if (!r) {
127		acpi_status = radeon_acpi_init(rdev);
128		if (acpi_status)
129		dev_dbg(dev->dev,
130				"Error during ACPI methods call\n");
131	}
132
133out:
134	if (r)
135		radeon_driver_unload_kms(dev);
136	return r;
137}
138
139/**
140 * radeon_set_filp_rights - Set filp right.
141 *
142 * @dev: drm dev pointer
143 * @owner: drm file
144 * @applier: drm file
145 * @value: value
146 *
147 * Sets the filp rights for the device (all asics).
148 */
149static void radeon_set_filp_rights(struct drm_device *dev,
150				   struct drm_file **owner,
151				   struct drm_file *applier,
152				   uint32_t *value)
153{
154	DRM_LOCK(dev);
155	if (*value == 1) {
156		/* wants rights */
157		if (!*owner)
158			*owner = applier;
159	} else if (*value == 0) {
160		/* revokes rights */
161		if (*owner == applier)
162			*owner = NULL;
163	}
164	*value = *owner == applier ? 1 : 0;
165	DRM_UNLOCK(dev);
166}
167
168/*
169 * Userspace get information ioctl
170 */
171/**
172 * radeon_info_ioctl - answer a device specific request.
173 *
174 * @rdev: radeon device pointer
175 * @data: request object
176 * @filp: drm filp
177 *
178 * This function is used to pass device specific parameters to the userspace
179 * drivers.  Examples include: pci device id, pipeline parms, tiling params,
180 * etc. (all asics).
181 * Returns 0 on success, -EINVAL on failure.
182 */
183static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
184{
185	struct radeon_device *rdev = dev->dev_private;
186	struct drm_radeon_info *info = data;
187	struct radeon_mode_info *minfo = &rdev->mode_info;
188	uint32_t value, *value_ptr;
189	uint64_t value64, *value_ptr64;
190	struct drm_crtc *crtc;
191	int i, found;
192
193	/* TIMESTAMP is a 64-bit value, needs special handling. */
194	if (info->request == RADEON_INFO_TIMESTAMP) {
195		if (rdev->family >= CHIP_R600) {
196			value_ptr64 = (uint64_t*)((unsigned long)info->value);
197			if (rdev->family >= CHIP_TAHITI) {
198				value64 = si_get_gpu_clock(rdev);
199			} else {
200				value64 = r600_get_gpu_clock(rdev);
201			}
202
203			if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
204				DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
205				return -EFAULT;
206			}
207			return 0;
208		} else {
209			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
210			return -EINVAL;
211		}
212	}
213
214	value_ptr = (uint32_t *)((unsigned long)info->value);
215	if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
216		DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
217		return -EFAULT;
218	}
219
220	switch (info->request) {
221	case RADEON_INFO_DEVICE_ID:
222		value = dev->pci_device;
223		break;
224	case RADEON_INFO_NUM_GB_PIPES:
225		value = rdev->num_gb_pipes;
226		break;
227	case RADEON_INFO_NUM_Z_PIPES:
228		value = rdev->num_z_pipes;
229		break;
230	case RADEON_INFO_ACCEL_WORKING:
231		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
232		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
233			value = false;
234		else
235			value = rdev->accel_working;
236		break;
237	case RADEON_INFO_CRTC_FROM_ID:
238		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
239			crtc = (struct drm_crtc *)minfo->crtcs[i];
240			if (crtc && crtc->base.id == value) {
241				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
242				value = radeon_crtc->crtc_id;
243				found = 1;
244				break;
245			}
246		}
247		if (!found) {
248			DRM_DEBUG_KMS("unknown crtc id %d\n", value);
249			return -EINVAL;
250		}
251		break;
252	case RADEON_INFO_ACCEL_WORKING2:
253		value = rdev->accel_working;
254		break;
255	case RADEON_INFO_TILING_CONFIG:
256		if (rdev->family >= CHIP_TAHITI)
257			value = rdev->config.si.tile_config;
258		else if (rdev->family >= CHIP_CAYMAN)
259			value = rdev->config.cayman.tile_config;
260		else if (rdev->family >= CHIP_CEDAR)
261			value = rdev->config.evergreen.tile_config;
262		else if (rdev->family >= CHIP_RV770)
263			value = rdev->config.rv770.tile_config;
264		else if (rdev->family >= CHIP_R600)
265			value = rdev->config.r600.tile_config;
266		else {
267			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
268			return -EINVAL;
269		}
270		break;
271	case RADEON_INFO_WANT_HYPERZ:
272		/* The "value" here is both an input and output parameter.
273		 * If the input value is 1, filp requests hyper-z access.
274		 * If the input value is 0, filp revokes its hyper-z access.
275		 *
276		 * When returning, the value is 1 if filp owns hyper-z access,
277		 * 0 otherwise. */
278		if (value >= 2) {
279			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
280			return -EINVAL;
281		}
282		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
283		break;
284	case RADEON_INFO_WANT_CMASK:
285		/* The same logic as Hyper-Z. */
286		if (value >= 2) {
287			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
288			return -EINVAL;
289		}
290		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
291		break;
292	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
293		/* return clock value in KHz */
294		value = rdev->clock.spll.reference_freq * 10;
295		break;
296	case RADEON_INFO_NUM_BACKENDS:
297		if (rdev->family >= CHIP_TAHITI)
298			value = rdev->config.si.max_backends_per_se *
299				rdev->config.si.max_shader_engines;
300		else if (rdev->family >= CHIP_CAYMAN)
301			value = rdev->config.cayman.max_backends_per_se *
302				rdev->config.cayman.max_shader_engines;
303		else if (rdev->family >= CHIP_CEDAR)
304			value = rdev->config.evergreen.max_backends;
305		else if (rdev->family >= CHIP_RV770)
306			value = rdev->config.rv770.max_backends;
307		else if (rdev->family >= CHIP_R600)
308			value = rdev->config.r600.max_backends;
309		else {
310			return -EINVAL;
311		}
312		break;
313	case RADEON_INFO_NUM_TILE_PIPES:
314		if (rdev->family >= CHIP_TAHITI)
315			value = rdev->config.si.max_tile_pipes;
316		else if (rdev->family >= CHIP_CAYMAN)
317			value = rdev->config.cayman.max_tile_pipes;
318		else if (rdev->family >= CHIP_CEDAR)
319			value = rdev->config.evergreen.max_tile_pipes;
320		else if (rdev->family >= CHIP_RV770)
321			value = rdev->config.rv770.max_tile_pipes;
322		else if (rdev->family >= CHIP_R600)
323			value = rdev->config.r600.max_tile_pipes;
324		else {
325			return -EINVAL;
326		}
327		break;
328	case RADEON_INFO_FUSION_GART_WORKING:
329		value = 1;
330		break;
331	case RADEON_INFO_BACKEND_MAP:
332		if (rdev->family >= CHIP_TAHITI)
333			value = rdev->config.si.backend_map;
334		else if (rdev->family >= CHIP_CAYMAN)
335			value = rdev->config.cayman.backend_map;
336		else if (rdev->family >= CHIP_CEDAR)
337			value = rdev->config.evergreen.backend_map;
338		else if (rdev->family >= CHIP_RV770)
339			value = rdev->config.rv770.backend_map;
340		else if (rdev->family >= CHIP_R600)
341			value = rdev->config.r600.backend_map;
342		else {
343			return -EINVAL;
344		}
345		break;
346	case RADEON_INFO_VA_START:
347		/* this is where we report if vm is supported or not */
348		if (rdev->family < CHIP_CAYMAN)
349			return -EINVAL;
350		value = RADEON_VA_RESERVED_SIZE;
351		break;
352	case RADEON_INFO_IB_VM_MAX_SIZE:
353		/* this is where we report if vm is supported or not */
354		if (rdev->family < CHIP_CAYMAN)
355			return -EINVAL;
356		value = RADEON_IB_VM_MAX_SIZE;
357		break;
358	case RADEON_INFO_MAX_PIPES:
359		if (rdev->family >= CHIP_TAHITI)
360			value = rdev->config.si.max_cu_per_sh;
361		else if (rdev->family >= CHIP_CAYMAN)
362			value = rdev->config.cayman.max_pipes_per_simd;
363		else if (rdev->family >= CHIP_CEDAR)
364			value = rdev->config.evergreen.max_pipes;
365		else if (rdev->family >= CHIP_RV770)
366			value = rdev->config.rv770.max_pipes;
367		else if (rdev->family >= CHIP_R600)
368			value = rdev->config.r600.max_pipes;
369		else {
370			return -EINVAL;
371		}
372		break;
373	case RADEON_INFO_MAX_SE:
374		if (rdev->family >= CHIP_TAHITI)
375			value = rdev->config.si.max_shader_engines;
376		else if (rdev->family >= CHIP_CAYMAN)
377			value = rdev->config.cayman.max_shader_engines;
378		else if (rdev->family >= CHIP_CEDAR)
379			value = rdev->config.evergreen.num_ses;
380		else
381			value = 1;
382		break;
383	case RADEON_INFO_MAX_SH_PER_SE:
384		if (rdev->family >= CHIP_TAHITI)
385			value = rdev->config.si.max_sh_per_se;
386		else
387			return -EINVAL;
388		break;
389	default:
390		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
391		return -EINVAL;
392	}
393	if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
394		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
395		return -EFAULT;
396	}
397	return 0;
398}
399
400
401/*
402 * Outdated mess for old drm with Xorg being in charge (void function now).
403 */
404/**
405 * radeon_driver_firstopen_kms - drm callback for first open
406 *
407 * @dev: drm dev pointer
408 *
409 * Nothing to be done for KMS (all asics).
410 * Returns 0 on success.
411 */
412int radeon_driver_firstopen_kms(struct drm_device *dev)
413{
414	return 0;
415}
416
417/**
418 * radeon_driver_firstopen_kms - drm callback for last close
419 *
420 * @dev: drm dev pointer
421 *
422 * Switch vga switcheroo state after last close (all asics).
423 */
424void radeon_driver_lastclose_kms(struct drm_device *dev)
425{
426#ifdef FREEBSD_WIP
427	vga_switcheroo_process_delayed_switch();
428#endif /* FREEBSD_WIP */
429}
430
431/**
432 * radeon_driver_open_kms - drm callback for open
433 *
434 * @dev: drm dev pointer
435 * @file_priv: drm file
436 *
437 * On device open, init vm on cayman+ (all asics).
438 * Returns 0 on success, error on failure.
439 */
440int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
441{
442	struct radeon_device *rdev = dev->dev_private;
443
444	file_priv->driver_priv = NULL;
445
446	/* new gpu have virtual address space support */
447	if (rdev->family >= CHIP_CAYMAN) {
448		struct radeon_fpriv *fpriv;
449		struct radeon_bo_va *bo_va;
450		int r;
451
452		fpriv = malloc(sizeof(*fpriv), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
453		if (unlikely(!fpriv)) {
454			return -ENOMEM;
455		}
456
457		radeon_vm_init(rdev, &fpriv->vm);
458
459		/* map the ib pool buffer read only into
460		 * virtual address space */
461		bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
462					 rdev->ring_tmp_bo.bo);
463		r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
464					  RADEON_VM_PAGE_READABLE |
465					  RADEON_VM_PAGE_SNOOPED);
466		if (r) {
467			radeon_vm_fini(rdev, &fpriv->vm);
468			free(fpriv, DRM_MEM_DRIVER);
469			return r;
470		}
471
472		file_priv->driver_priv = fpriv;
473	}
474	return 0;
475}
476
477/**
478 * radeon_driver_postclose_kms - drm callback for post close
479 *
480 * @dev: drm dev pointer
481 * @file_priv: drm file
482 *
483 * On device post close, tear down vm on cayman+ (all asics).
484 */
485void radeon_driver_postclose_kms(struct drm_device *dev,
486				 struct drm_file *file_priv)
487{
488	struct radeon_device *rdev = dev->dev_private;
489
490	/* new gpu have virtual address space support */
491	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
492		struct radeon_fpriv *fpriv = file_priv->driver_priv;
493		struct radeon_bo_va *bo_va;
494		int r;
495
496		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
497		if (!r) {
498			bo_va = radeon_vm_bo_find(&fpriv->vm,
499						  rdev->ring_tmp_bo.bo);
500			if (bo_va)
501				radeon_vm_bo_rmv(rdev, bo_va);
502			radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
503		}
504
505		radeon_vm_fini(rdev, &fpriv->vm);
506		free(fpriv, DRM_MEM_DRIVER);
507		file_priv->driver_priv = NULL;
508	}
509}
510
511/**
512 * radeon_driver_preclose_kms - drm callback for pre close
513 *
514 * @dev: drm dev pointer
515 * @file_priv: drm file
516 *
517 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
518 * (all asics).
519 */
520void radeon_driver_preclose_kms(struct drm_device *dev,
521				struct drm_file *file_priv)
522{
523	struct radeon_device *rdev = dev->dev_private;
524	if (rdev->hyperz_filp == file_priv)
525		rdev->hyperz_filp = NULL;
526	if (rdev->cmask_filp == file_priv)
527		rdev->cmask_filp = NULL;
528}
529
530/*
531 * VBlank related functions.
532 */
533/**
534 * radeon_get_vblank_counter_kms - get frame count
535 *
536 * @dev: drm dev pointer
537 * @crtc: crtc to get the frame count from
538 *
539 * Gets the frame count on the requested crtc (all asics).
540 * Returns frame count on success, -EINVAL on failure.
541 */
542u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
543{
544	struct radeon_device *rdev = dev->dev_private;
545
546	if (crtc < 0 || crtc >= rdev->num_crtc) {
547		DRM_ERROR("Invalid crtc %d\n", crtc);
548		return -EINVAL;
549	}
550
551	return radeon_get_vblank_counter(rdev, crtc);
552}
553
554/**
555 * radeon_enable_vblank_kms - enable vblank interrupt
556 *
557 * @dev: drm dev pointer
558 * @crtc: crtc to enable vblank interrupt for
559 *
560 * Enable the interrupt on the requested crtc (all asics).
561 * Returns 0 on success, -EINVAL on failure.
562 */
563int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
564{
565	struct radeon_device *rdev = dev->dev_private;
566	int r;
567
568	if (crtc < 0 || crtc >= rdev->num_crtc) {
569		DRM_ERROR("Invalid crtc %d\n", crtc);
570		return -EINVAL;
571	}
572
573	mtx_lock(&rdev->irq.lock);
574	rdev->irq.crtc_vblank_int[crtc] = true;
575	r = radeon_irq_set(rdev);
576	mtx_unlock(&rdev->irq.lock);
577	return r;
578}
579
580/**
581 * radeon_disable_vblank_kms - disable vblank interrupt
582 *
583 * @dev: drm dev pointer
584 * @crtc: crtc to disable vblank interrupt for
585 *
586 * Disable the interrupt on the requested crtc (all asics).
587 */
588void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
589{
590	struct radeon_device *rdev = dev->dev_private;
591
592	if (crtc < 0 || crtc >= rdev->num_crtc) {
593		DRM_ERROR("Invalid crtc %d\n", crtc);
594		return;
595	}
596
597	mtx_lock(&rdev->irq.lock);
598	rdev->irq.crtc_vblank_int[crtc] = false;
599	radeon_irq_set(rdev);
600	mtx_unlock(&rdev->irq.lock);
601}
602
603/**
604 * radeon_get_vblank_timestamp_kms - get vblank timestamp
605 *
606 * @dev: drm dev pointer
607 * @crtc: crtc to get the timestamp for
608 * @max_error: max error
609 * @vblank_time: time value
610 * @flags: flags passed to the driver
611 *
612 * Gets the timestamp on the requested crtc based on the
613 * scanout position.  (all asics).
614 * Returns positive status flags on success, negative error on failure.
615 */
616int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
617				    int *max_error,
618				    struct timeval *vblank_time,
619				    unsigned flags)
620{
621	struct drm_crtc *drmcrtc;
622	struct radeon_device *rdev = dev->dev_private;
623
624	if (crtc < 0 || crtc >= dev->num_crtcs) {
625		DRM_ERROR("Invalid crtc %d\n", crtc);
626		return -EINVAL;
627	}
628
629	/* Get associated drm_crtc: */
630	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
631
632	/* Helper routine in DRM core does all the work: */
633	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
634						     vblank_time, flags,
635						     drmcrtc);
636}
637
638/*
639 * IOCTL.
640 */
641int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
642			 struct drm_file *file_priv)
643{
644	/* Not valid in KMS. */
645	return -EINVAL;
646}
647
648#define KMS_INVALID_IOCTL(name)						\
649static int								\
650name(struct drm_device *dev, void *data, struct drm_file *file_priv)	\
651{									\
652	DRM_ERROR("invalid ioctl with kms %s\n", __func__);		\
653	return -EINVAL;							\
654}
655
656/*
657 * All these ioctls are invalid in kms world.
658 */
659KMS_INVALID_IOCTL(radeon_cp_init_kms)
660KMS_INVALID_IOCTL(radeon_cp_start_kms)
661KMS_INVALID_IOCTL(radeon_cp_stop_kms)
662KMS_INVALID_IOCTL(radeon_cp_reset_kms)
663KMS_INVALID_IOCTL(radeon_cp_idle_kms)
664KMS_INVALID_IOCTL(radeon_cp_resume_kms)
665KMS_INVALID_IOCTL(radeon_engine_reset_kms)
666KMS_INVALID_IOCTL(radeon_fullscreen_kms)
667KMS_INVALID_IOCTL(radeon_cp_swap_kms)
668KMS_INVALID_IOCTL(radeon_cp_clear_kms)
669KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
670KMS_INVALID_IOCTL(radeon_cp_indices_kms)
671KMS_INVALID_IOCTL(radeon_cp_texture_kms)
672KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
673KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
674KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
675KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
676KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
677KMS_INVALID_IOCTL(radeon_cp_flip_kms)
678KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
679KMS_INVALID_IOCTL(radeon_mem_free_kms)
680KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
681KMS_INVALID_IOCTL(radeon_irq_emit_kms)
682KMS_INVALID_IOCTL(radeon_irq_wait_kms)
683KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
684KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
685KMS_INVALID_IOCTL(radeon_surface_free_kms)
686
687
688struct drm_ioctl_desc radeon_ioctls_kms[] = {
689	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
690	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
691	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
692	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
693	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
694	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
695	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
696	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
697	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
698	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
699	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
700	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
701	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
702	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
703	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
704	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
705	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
706	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
707	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
708	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
709	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
710	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
711	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
712	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
713	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
714	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
715	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
716	/* KMS */
717	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
718	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
719	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
720	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
721	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
722	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
723	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
724	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
725	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
726	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
727	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
728	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
729	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
730};
731int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
732