1/*	$NetBSD: radeon.h,v 1.12 2024/04/16 14:34:02 riastradh Exp $	*/
2
3/*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 *          Alex Deucher
28 *          Jerome Glisse
29 */
30#ifndef __RADEON_H__
31#define __RADEON_H__
32
33/* TODO: Here are things that needs to be done :
34 *	- surface allocator & initializer : (bit like scratch reg) should
35 *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
36 *	  related to surface
37 *	- WB : write back stuff (do it bit like scratch reg things)
38 *	- Vblank : look at Jesse's rework and what we should do
39 *	- r600/r700: gart & cp
40 *	- cs : clean cs ioctl use bitmap & things like that.
41 *	- power management stuff
42 *	- Barrier in gart code
43 *	- Unmappabled vram ?
44 *	- TESTING, TESTING, TESTING
45 */
46
47/* Initialization path:
48 *  We expect that acceleration initialization might fail for various
49 *  reasons even thought we work hard to make it works on most
50 *  configurations. In order to still have a working userspace in such
51 *  situation the init path must succeed up to the memory controller
52 *  initialization point. Failure before this point are considered as
53 *  fatal error. Here is the init callchain :
54 *      radeon_device_init  perform common structure, mutex initialization
55 *      asic_init           setup the GPU memory layout and perform all
56 *                          one time initialization (failure in this
57 *                          function are considered fatal)
58 *      asic_startup        setup the GPU acceleration, in order to
59 *                          follow guideline the first thing this
60 *                          function should do is setting the GPU
61 *                          memory controller (only MC setup failure
62 *                          are considered as fatal)
63 */
64
65#include <linux/atomic.h>
66#include <linux/wait.h>
67#include <linux/list.h>
68#include <linux/kref.h>
69#include <linux/interval_tree.h>
70#include <linux/hashtable.h>
71#include <linux/dma-fence.h>
72#include <linux/acpi.h>
73
74#ifdef CONFIG_MMU_NOTIFIER
75#include <linux/mmu_notifier.h>
76#endif
77
78#include <drm/ttm/ttm_bo_api.h>
79#include <drm/ttm/ttm_bo_driver.h>
80#include <drm/ttm/ttm_placement.h>
81#include <drm/ttm/ttm_module.h>
82#include <drm/ttm/ttm_execbuf_util.h>
83
84#include <drm/drm_gem.h>
85
86#include "radeon_family.h"
87#include "radeon_mode.h"
88#include "radeon_reg.h"
89
90/*
91 * Modules parameters.
92 */
93extern int radeon_no_wb;
94extern int radeon_modeset;
95extern int radeon_dynclks;
96extern int radeon_r4xx_atom;
97extern int radeon_agpmode;
98extern int radeon_vram_limit;
99extern int radeon_gart_size;
100extern int radeon_benchmarking;
101extern int radeon_testing;
102extern int radeon_connector_table;
103extern int radeon_tv;
104extern int radeon_audio;
105extern int radeon_disp_priority;
106extern int radeon_hw_i2c;
107extern int radeon_pcie_gen2;
108extern int radeon_msi;
109extern int radeon_lockup_timeout;
110extern int radeon_fastfb;
111extern int radeon_dpm;
112extern int radeon_aspm;
113extern int radeon_runtime_pm;
114extern int radeon_hard_reset;
115extern int radeon_vm_size;
116extern int radeon_vm_block_size;
117extern int radeon_deep_color;
118extern int radeon_use_pflipirq;
119extern int radeon_bapm;
120extern int radeon_backlight;
121extern int radeon_auxch;
122extern int radeon_mst;
123extern int radeon_uvd;
124extern int radeon_vce;
125extern int radeon_si_support;
126extern int radeon_cik_support;
127
128/*
129 * Copy from radeon_drv.h so we don't have to include both and have conflicting
130 * symbol;
131 */
132#define RADEON_MAX_USEC_TIMEOUT			100000	/* 100 ms */
133#define RADEON_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
134#define RADEON_USEC_IB_TEST_TIMEOUT		1000000 /* 1s */
135/* RADEON_IB_POOL_SIZE must be a power of 2 */
136#define RADEON_IB_POOL_SIZE			16
137#define RADEON_DEBUGFS_MAX_COMPONENTS		32
138#define RADEONFB_CONN_LIMIT			4
139#define RADEON_BIOS_NUM_SCRATCH			8
140
141/* internal ring indices */
142/* r1xx+ has gfx CP ring */
143#define RADEON_RING_TYPE_GFX_INDEX		0
144
145/* cayman has 2 compute CP rings */
146#define CAYMAN_RING_TYPE_CP1_INDEX		1
147#define CAYMAN_RING_TYPE_CP2_INDEX		2
148
149/* R600+ has an async dma ring */
150#define R600_RING_TYPE_DMA_INDEX		3
151/* cayman add a second async dma ring */
152#define CAYMAN_RING_TYPE_DMA1_INDEX		4
153
154/* R600+ */
155#define R600_RING_TYPE_UVD_INDEX		5
156
157/* TN+ */
158#define TN_RING_TYPE_VCE1_INDEX			6
159#define TN_RING_TYPE_VCE2_INDEX			7
160
161/* max number of rings */
162#define RADEON_NUM_RINGS			8
163
164/* number of hw syncs before falling back on blocking */
165#define RADEON_NUM_SYNCS			4
166
167/* hardcode those limit for now */
168#define RADEON_VA_IB_OFFSET			(1 << 20)
169#define RADEON_VA_RESERVED_SIZE			(8 << 20)
170#define RADEON_IB_VM_MAX_SIZE			(64 << 10)
171
172/* hard reset data */
173#define RADEON_ASIC_RESET_DATA                  0x39d5e86b
174
175/* reset flags */
176#define RADEON_RESET_GFX			(1 << 0)
177#define RADEON_RESET_COMPUTE			(1 << 1)
178#define RADEON_RESET_DMA			(1 << 2)
179#define RADEON_RESET_CP				(1 << 3)
180#define RADEON_RESET_GRBM			(1 << 4)
181#define RADEON_RESET_DMA1			(1 << 5)
182#define RADEON_RESET_RLC			(1 << 6)
183#define RADEON_RESET_SEM			(1 << 7)
184#define RADEON_RESET_IH				(1 << 8)
185#define RADEON_RESET_VMC			(1 << 9)
186#define RADEON_RESET_MC				(1 << 10)
187#define RADEON_RESET_DISPLAY			(1 << 11)
188
189/* CG block flags */
190#define RADEON_CG_BLOCK_GFX			(1 << 0)
191#define RADEON_CG_BLOCK_MC			(1 << 1)
192#define RADEON_CG_BLOCK_SDMA			(1 << 2)
193#define RADEON_CG_BLOCK_UVD			(1 << 3)
194#define RADEON_CG_BLOCK_VCE			(1 << 4)
195#define RADEON_CG_BLOCK_HDP			(1 << 5)
196#define RADEON_CG_BLOCK_BIF			(1 << 6)
197
198/* CG flags */
199#define RADEON_CG_SUPPORT_GFX_MGCG		(1 << 0)
200#define RADEON_CG_SUPPORT_GFX_MGLS		(1 << 1)
201#define RADEON_CG_SUPPORT_GFX_CGCG		(1 << 2)
202#define RADEON_CG_SUPPORT_GFX_CGLS		(1 << 3)
203#define RADEON_CG_SUPPORT_GFX_CGTS		(1 << 4)
204#define RADEON_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
205#define RADEON_CG_SUPPORT_GFX_CP_LS		(1 << 6)
206#define RADEON_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
207#define RADEON_CG_SUPPORT_MC_LS			(1 << 8)
208#define RADEON_CG_SUPPORT_MC_MGCG		(1 << 9)
209#define RADEON_CG_SUPPORT_SDMA_LS		(1 << 10)
210#define RADEON_CG_SUPPORT_SDMA_MGCG		(1 << 11)
211#define RADEON_CG_SUPPORT_BIF_LS		(1 << 12)
212#define RADEON_CG_SUPPORT_UVD_MGCG		(1 << 13)
213#define RADEON_CG_SUPPORT_VCE_MGCG		(1 << 14)
214#define RADEON_CG_SUPPORT_HDP_LS		(1 << 15)
215#define RADEON_CG_SUPPORT_HDP_MGCG		(1 << 16)
216
217/* PG flags */
218#define RADEON_PG_SUPPORT_GFX_PG		(1 << 0)
219#define RADEON_PG_SUPPORT_GFX_SMG		(1 << 1)
220#define RADEON_PG_SUPPORT_GFX_DMG		(1 << 2)
221#define RADEON_PG_SUPPORT_UVD			(1 << 3)
222#define RADEON_PG_SUPPORT_VCE			(1 << 4)
223#define RADEON_PG_SUPPORT_CP			(1 << 5)
224#define RADEON_PG_SUPPORT_GDS			(1 << 6)
225#define RADEON_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
226#define RADEON_PG_SUPPORT_SDMA			(1 << 8)
227#define RADEON_PG_SUPPORT_ACP			(1 << 9)
228#define RADEON_PG_SUPPORT_SAMU			(1 << 10)
229
230/* max cursor sizes (in pixels) */
231#define CURSOR_WIDTH 64
232#define CURSOR_HEIGHT 64
233
234#define CIK_CURSOR_WIDTH 128
235#define CIK_CURSOR_HEIGHT 128
236
237/*
238 * Errata workarounds.
239 */
240enum radeon_pll_errata {
241	CHIP_ERRATA_R300_CG             = 0x00000001,
242	CHIP_ERRATA_PLL_DUMMYREADS      = 0x00000002,
243	CHIP_ERRATA_PLL_DELAY           = 0x00000004
244};
245
246
247struct radeon_device;
248
249#ifdef __NetBSD__
250extern struct radeon_device *radeon_device_private(device_t);
251#endif
252
253/*
254 * BIOS.
255 */
256bool radeon_get_bios(struct radeon_device *rdev);
257
258/*
259 * Dummy page
260 */
261struct radeon_dummy_page {
262	uint64_t	entry;
263#ifdef __NetBSD__
264	bus_dma_segment_t	rdp_seg;
265	bus_dmamap_t		rdp_map;
266	void		*rdp_addr;
267#else
268	struct page	*page;
269#endif
270	dma_addr_t	addr;
271};
272int radeon_dummy_page_init(struct radeon_device *rdev);
273void radeon_dummy_page_fini(struct radeon_device *rdev);
274
275
276/*
277 * Clocks
278 */
279struct radeon_clock {
280	struct radeon_pll p1pll;
281	struct radeon_pll p2pll;
282	struct radeon_pll dcpll;
283	struct radeon_pll spll;
284	struct radeon_pll mpll;
285	/* 10 Khz units */
286	uint32_t default_mclk;
287	uint32_t default_sclk;
288	uint32_t default_dispclk;
289	uint32_t current_dispclk;
290	uint32_t dp_extclk;
291	uint32_t max_pixel_clock;
292	uint32_t vco_freq;
293};
294
295/*
296 * Power management
297 */
298int radeon_pm_init(struct radeon_device *rdev);
299int radeon_pm_late_init(struct radeon_device *rdev);
300void radeon_pm_fini(struct radeon_device *rdev);
301void radeon_pm_compute_clocks(struct radeon_device *rdev);
302void radeon_pm_suspend(struct radeon_device *rdev);
303void radeon_pm_resume(struct radeon_device *rdev);
304void radeon_combios_get_power_modes(struct radeon_device *rdev);
305void radeon_atombios_get_power_modes(struct radeon_device *rdev);
306int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
307				   u8 clock_type,
308				   u32 clock,
309				   bool strobe_mode,
310				   struct atom_clock_dividers *dividers);
311int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
312					u32 clock,
313					bool strobe_mode,
314					struct atom_mpll_param *mpll_param);
315void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
316int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
317					  u16 voltage_level, u8 voltage_type,
318					  u32 *gpio_value, u32 *gpio_mask);
319void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
320					 u32 eng_clock, u32 mem_clock);
321int radeon_atom_get_voltage_step(struct radeon_device *rdev,
322				 u8 voltage_type, u16 *voltage_step);
323int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
324			     u16 voltage_id, u16 *voltage);
325int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
326						      u16 *voltage,
327						      u16 leakage_idx);
328int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
329					  u16 *leakage_id);
330int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
331							 u16 *vddc, u16 *vddci,
332							 u16 virtual_voltage_id,
333							 u16 vbios_voltage_id);
334int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
335				u16 virtual_voltage_id,
336				u16 *voltage);
337int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
338				      u8 voltage_type,
339				      u16 nominal_voltage,
340				      u16 *true_voltage);
341int radeon_atom_get_min_voltage(struct radeon_device *rdev,
342				u8 voltage_type, u16 *min_voltage);
343int radeon_atom_get_max_voltage(struct radeon_device *rdev,
344				u8 voltage_type, u16 *max_voltage);
345int radeon_atom_get_voltage_table(struct radeon_device *rdev,
346				  u8 voltage_type, u8 voltage_mode,
347				  struct atom_voltage_table *voltage_table);
348bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
349				 u8 voltage_type, u8 voltage_mode);
350int radeon_atom_get_svi2_info(struct radeon_device *rdev,
351			      u8 voltage_type,
352			      u8 *svd_gpio_id, u8 *svc_gpio_id);
353void radeon_atom_update_memory_dll(struct radeon_device *rdev,
354				   u32 mem_clock);
355void radeon_atom_set_ac_timing(struct radeon_device *rdev,
356			       u32 mem_clock);
357int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
358				  u8 module_index,
359				  struct atom_mc_reg_table *reg_table);
360int radeon_atom_get_memory_info(struct radeon_device *rdev,
361				u8 module_index, struct atom_memory_info *mem_info);
362int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
363				     bool gddr5, u8 module_index,
364				     struct atom_memory_clock_range_table *mclk_range_table);
365int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
366			     u16 voltage_id, u16 *voltage);
367void rs690_pm_info(struct radeon_device *rdev);
368extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
369				    unsigned *bankh, unsigned *mtaspect,
370				    unsigned *tile_split);
371
372/*
373 * Fences.
374 */
375struct radeon_fence_driver {
376	struct radeon_device		*rdev;
377	uint32_t			scratch_reg;
378	uint64_t			gpu_addr;
379	volatile uint32_t		*cpu_addr;
380	/* sync_seq is protected by ring emission lock */
381	uint64_t			sync_seq[RADEON_NUM_RINGS];
382	atomic64_t			last_seq;
383	bool				initialized, delayed_irq;
384	struct delayed_work		lockup_work;
385};
386
387struct radeon_fence {
388	struct dma_fence		base;
389
390	struct radeon_device	*rdev;
391	uint64_t		seq;
392	/* RB, DMA, etc. */
393	unsigned		ring;
394	bool			is_vm_update;
395
396	TAILQ_ENTRY(radeon_fence)	fence_check;
397};
398
399int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
400int radeon_fence_driver_init(struct radeon_device *rdev);
401void radeon_fence_driver_fini(struct radeon_device *rdev);
402void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
403int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
404void radeon_fence_wakeup_locked(struct radeon_device *rdev);
405void radeon_fence_process(struct radeon_device *rdev, int ring);
406bool radeon_fence_signaled(struct radeon_fence *fence);
407long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, long timeout);
408int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
409int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
410int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
411int radeon_fence_wait_any(struct radeon_device *rdev,
412			  struct radeon_fence **fences,
413			  bool intr);
414struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
415void radeon_fence_unref(struct radeon_fence **fence);
416unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
417bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
418void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
419static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
420						      struct radeon_fence *b)
421{
422	if (!a) {
423		return b;
424	}
425
426	if (!b) {
427		return a;
428	}
429
430	BUG_ON(a->ring != b->ring);
431
432	if (a->seq > b->seq) {
433		return a;
434	} else {
435		return b;
436	}
437}
438
439static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
440					   struct radeon_fence *b)
441{
442	if (!a) {
443		return false;
444	}
445
446	if (!b) {
447		return true;
448	}
449
450	BUG_ON(a->ring != b->ring);
451
452	return a->seq < b->seq;
453}
454
455/*
456 * Tiling registers
457 */
458struct radeon_surface_reg {
459	struct radeon_bo *bo;
460};
461
462#define RADEON_GEM_MAX_SURFACES 8
463
464/*
465 * TTM.
466 */
467struct radeon_mman {
468	struct ttm_bo_device		bdev;
469	bool				initialized;
470
471#if defined(CONFIG_DEBUG_FS)
472	struct dentry			*vram;
473	struct dentry			*gtt;
474#endif
475};
476
477struct radeon_bo_list {
478	struct radeon_bo		*robj;
479	struct ttm_validate_buffer	tv;
480	uint64_t			gpu_offset;
481	unsigned			preferred_domains;
482	unsigned			allowed_domains;
483	uint32_t			tiling_flags;
484};
485
486/* bo virtual address in a specific vm */
487struct radeon_bo_va {
488	/* protected by bo being reserved */
489	struct list_head		bo_list;
490	uint32_t			flags;
491	struct radeon_fence		*last_pt_update;
492	unsigned			ref_count;
493
494	/* protected by vm mutex */
495	struct interval_tree_node	it;
496	struct list_head		vm_status;
497
498	/* constant after initialization */
499	struct radeon_vm		*vm;
500	struct radeon_bo		*bo;
501};
502
503struct radeon_bo {
504	/* Protected by gem.mutex */
505	struct list_head		list;
506	/* Protected by tbo.reserved */
507	u32				initial_domain;
508	struct ttm_place		placements[4];
509	struct ttm_placement		placement;
510	struct ttm_buffer_object	tbo;
511	struct ttm_bo_kmap_obj		kmap;
512	u32				flags;
513	unsigned			pin_count;
514	void				*kptr;
515	u32				tiling_flags;
516	u32				pitch;
517	int				surface_reg;
518	unsigned			prime_shared_count;
519	/* list of all virtual address to which this bo
520	 * is associated to
521	 */
522	struct list_head		va;
523	/* Constant after initialization */
524	struct radeon_device		*rdev;
525
526	struct ttm_bo_kmap_obj		dma_buf_vmap;
527#ifndef __NetBSD__		/* XXX pid???  */
528	pid_t				pid;
529#endif
530
531#ifdef CONFIG_MMU_NOTIFIER
532	struct mmu_interval_notifier	notifier;
533#endif
534};
535#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
536
537int radeon_gem_debugfs_init(struct radeon_device *rdev);
538
539/* sub-allocation manager, it has to be protected by another lock.
540 * By conception this is an helper for other part of the driver
541 * like the indirect buffer or semaphore, which both have their
542 * locking.
543 *
544 * Principe is simple, we keep a list of sub allocation in offset
545 * order (first entry has offset == 0, last entry has the highest
546 * offset).
547 *
548 * When allocating new object we first check if there is room at
549 * the end total_size - (last_object_offset + last_object_size) >=
550 * alloc_size. If so we allocate new object there.
551 *
552 * When there is not enough room at the end, we start waiting for
553 * each sub object until we reach object_offset+object_size >=
554 * alloc_size, this object then become the sub object we return.
555 *
556 * Alignment can't be bigger than page size.
557 *
558 * Hole are not considered for allocation to keep things simple.
559 * Assumption is that there won't be hole (all object on same
560 * alignment).
561 */
562struct radeon_sa_manager {
563#ifdef __NetBSD__
564	spinlock_t		wq_lock;
565	drm_waitqueue_t		wq;
566#else
567	wait_queue_head_t	wq;
568#endif
569	struct radeon_bo	*bo;
570	struct list_head	*hole;
571	struct list_head	flist[RADEON_NUM_RINGS];
572	struct list_head	olist;
573	unsigned		size;
574	uint64_t		gpu_addr;
575	void			*cpu_ptr;
576	uint32_t		domain;
577	uint32_t		align;
578};
579
580struct radeon_sa_bo;
581
582/* sub-allocation buffer */
583struct radeon_sa_bo {
584	struct list_head		olist;
585	struct list_head		flist;
586	struct radeon_sa_manager	*manager;
587	unsigned			soffset;
588	unsigned			eoffset;
589	struct radeon_fence		*fence;
590};
591
592/*
593 * GEM objects.
594 */
595struct radeon_gem {
596	struct mutex		mutex;
597	struct list_head	objects;
598};
599
600int radeon_gem_init(struct radeon_device *rdev);
601void radeon_gem_fini(struct radeon_device *rdev);
602int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
603				int alignment, int initial_domain,
604				u32 flags, bool kernel,
605				struct drm_gem_object **obj);
606
607int radeon_mode_dumb_create(struct drm_file *file_priv,
608			    struct drm_device *dev,
609			    struct drm_mode_create_dumb *args);
610int radeon_mode_dumb_mmap(struct drm_file *filp,
611			  struct drm_device *dev,
612			  uint32_t handle, uint64_t *offset_p);
613
614/*
615 * Semaphores.
616 */
617struct radeon_semaphore {
618	struct radeon_sa_bo	*sa_bo;
619	signed			waiters;
620	uint64_t		gpu_addr;
621};
622
623int radeon_semaphore_create(struct radeon_device *rdev,
624			    struct radeon_semaphore **semaphore);
625bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
626				  struct radeon_semaphore *semaphore);
627bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
628				struct radeon_semaphore *semaphore);
629void radeon_semaphore_free(struct radeon_device *rdev,
630			   struct radeon_semaphore **semaphore,
631			   struct radeon_fence *fence);
632
633/*
634 * Synchronization
635 */
636struct radeon_sync {
637	struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
638	struct radeon_fence	*sync_to[RADEON_NUM_RINGS];
639	struct radeon_fence	*last_vm_update;
640};
641
642void radeon_sync_create(struct radeon_sync *sync);
643void radeon_sync_fence(struct radeon_sync *sync,
644		       struct radeon_fence *fence);
645int radeon_sync_resv(struct radeon_device *rdev,
646		     struct radeon_sync *sync,
647		     struct dma_resv *resv,
648		     bool shared);
649int radeon_sync_rings(struct radeon_device *rdev,
650		      struct radeon_sync *sync,
651		      int waiting_ring);
652void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
653		      struct radeon_fence *fence);
654
655/*
656 * GART structures, functions & helpers
657 */
658struct radeon_mc;
659
660#define RADEON_GPU_PAGE_SIZE 4096
661#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
662#define RADEON_GPU_PAGE_SHIFT 12
663#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
664
665#define RADEON_GART_PAGE_DUMMY  0
666#define RADEON_GART_PAGE_VALID	(1 << 0)
667#define RADEON_GART_PAGE_READ	(1 << 1)
668#define RADEON_GART_PAGE_WRITE	(1 << 2)
669#define RADEON_GART_PAGE_SNOOP	(1 << 3)
670
671struct radeon_gart {
672#ifdef __NetBSD__
673	bus_dma_segment_t		rg_table_seg;
674	bus_dmamap_t			rg_table_map;
675#endif
676	dma_addr_t			table_addr;
677	struct radeon_bo		*robj;
678	void				*ptr;
679	unsigned			num_gpu_pages;
680	unsigned			num_cpu_pages;
681	unsigned			table_size;
682	struct page			**pages;
683	uint64_t			*pages_entry;
684	bool				ready;
685};
686
687int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
688void radeon_gart_table_ram_free(struct radeon_device *rdev);
689int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
690void radeon_gart_table_vram_free(struct radeon_device *rdev);
691int radeon_gart_table_vram_pin(struct radeon_device *rdev);
692void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
693int radeon_gart_init(struct radeon_device *rdev);
694void radeon_gart_fini(struct radeon_device *rdev);
695#ifdef __NetBSD__
696void radeon_gart_unbind(struct radeon_device *rdev, unsigned gpu_start,
697			unsigned npages);
698int radeon_gart_bind(struct radeon_device *rdev, unsigned gpu_start,
699		     unsigned npages, struct page **pages,
700		     bus_dmamap_t dmamap, uint32_t flags);
701#else
702void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
703			int pages);
704int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
705		     int pages, struct page **pagelist,
706		     dma_addr_t *dma_addr, uint32_t flags);
707#endif
708
709
710/*
711 * GPU MC structures, functions & helpers
712 */
713struct radeon_mc {
714	resource_size_t		aper_size;
715	resource_size_t		aper_base;
716	resource_size_t		agp_base;
717	/* for some chips with <= 32MB we need to lie
718	 * about vram size near mc fb location */
719	u64			mc_vram_size;
720	u64			visible_vram_size;
721	u64			gtt_size;
722	u64			gtt_start;
723	u64			gtt_end;
724	u64			vram_start;
725	u64			vram_end;
726	unsigned		vram_width;
727	u64			real_vram_size;
728	int			vram_mtrr;
729	bool			vram_is_ddr;
730	bool			igp_sideport_enabled;
731	u64                     gtt_base_align;
732	u64                     mc_mask;
733};
734
735bool radeon_combios_sideport_present(struct radeon_device *rdev);
736bool radeon_atombios_sideport_present(struct radeon_device *rdev);
737
738/*
739 * GPU scratch registers structures, functions & helpers
740 */
741struct radeon_scratch {
742	unsigned		num_reg;
743	uint32_t                reg_base;
744	bool			free[32];
745	uint32_t		reg[32];
746};
747
748int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
749void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
750
751/*
752 * GPU doorbell structures, functions & helpers
753 */
754#define RADEON_MAX_DOORBELLS 1024	/* Reserve at most 1024 doorbell slots for radeon-owned rings. */
755
756struct radeon_doorbell {
757	/* doorbell mmio */
758	resource_size_t		base;
759	resource_size_t		size;
760#ifdef __NetBSD__
761	bus_space_tag_t		bst;
762	bus_space_handle_t	bsh;
763#else
764	u32 __iomem		*ptr;
765#endif
766	u32			num_doorbells;	/* Number of doorbells actually reserved for radeon. */
767	DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS);
768};
769
770int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
771void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
772
773/*
774 * IRQS.
775 */
776
777struct radeon_flip_work {
778	struct work_struct		flip_work;
779	struct work_struct		unpin_work;
780	struct radeon_device		*rdev;
781	int				crtc_id;
782	u32				target_vblank;
783	uint64_t			base;
784	struct drm_pending_vblank_event *event;
785	struct radeon_bo		*old_rbo;
786	struct dma_fence		*fence;
787	bool				async;
788};
789
790struct r500_irq_stat_regs {
791	u32 disp_int;
792	u32 hdmi0_status;
793};
794
795struct r600_irq_stat_regs {
796	u32 disp_int;
797	u32 disp_int_cont;
798	u32 disp_int_cont2;
799	u32 d1grph_int;
800	u32 d2grph_int;
801	u32 hdmi0_status;
802	u32 hdmi1_status;
803};
804
805struct evergreen_irq_stat_regs {
806	u32 disp_int[6];
807	u32 grph_int[6];
808	u32 afmt_status[6];
809};
810
811struct cik_irq_stat_regs {
812	u32 disp_int;
813	u32 disp_int_cont;
814	u32 disp_int_cont2;
815	u32 disp_int_cont3;
816	u32 disp_int_cont4;
817	u32 disp_int_cont5;
818	u32 disp_int_cont6;
819	u32 d1grph_int;
820	u32 d2grph_int;
821	u32 d3grph_int;
822	u32 d4grph_int;
823	u32 d5grph_int;
824	u32 d6grph_int;
825};
826
827union radeon_irq_stat_regs {
828	struct r500_irq_stat_regs r500;
829	struct r600_irq_stat_regs r600;
830	struct evergreen_irq_stat_regs evergreen;
831	struct cik_irq_stat_regs cik;
832};
833
834struct radeon_irq {
835	bool				installed;
836	spinlock_t			lock;
837	atomic_t			ring_int[RADEON_NUM_RINGS];
838	bool				crtc_vblank_int[RADEON_MAX_CRTCS];
839	atomic_t			pflip[RADEON_MAX_CRTCS];
840#ifdef __NetBSD__
841	spinlock_t			vblank_lock;
842	drm_waitqueue_t			vblank_queue;
843#else
844	wait_queue_head_t		vblank_queue;
845#endif
846	bool				hpd[RADEON_MAX_HPD_PINS];
847	bool				afmt[RADEON_MAX_AFMT_BLOCKS];
848	union radeon_irq_stat_regs	stat_regs;
849	bool				dpm_thermal;
850};
851
852int radeon_irq_kms_init(struct radeon_device *rdev);
853void radeon_irq_kms_fini(struct radeon_device *rdev);
854void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
855bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
856void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
857void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
858void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
859void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
860void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
861void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
862void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
863
864/*
865 * CP & rings.
866 */
867
868struct radeon_ib {
869	struct radeon_sa_bo		*sa_bo;
870	uint32_t			length_dw;
871	uint64_t			gpu_addr;
872	uint32_t			*ptr;
873	int				ring;
874	struct radeon_fence		*fence;
875	struct radeon_vm		*vm;
876	bool				is_const_ib;
877	struct radeon_sync		sync;
878};
879
880struct radeon_ring {
881	struct radeon_bo	*ring_obj;
882	volatile uint32_t	*ring;
883	unsigned		rptr_offs;
884	unsigned		rptr_save_reg;
885	u64			next_rptr_gpu_addr;
886	volatile u32		*next_rptr_cpu_addr;
887	unsigned		wptr;
888	unsigned		wptr_old;
889	unsigned		ring_size;
890	unsigned		ring_free_dw;
891	int			count_dw;
892	atomic_t		last_rptr;
893	atomic64_t		last_activity;
894	uint64_t		gpu_addr;
895	uint32_t		align_mask;
896	uint32_t		ptr_mask;
897	bool			ready;
898	u32			nop;
899	u32			idx;
900	u64			last_semaphore_signal_addr;
901	u64			last_semaphore_wait_addr;
902	/* for CIK queues */
903	u32 me;
904	u32 pipe;
905	u32 queue;
906	struct radeon_bo	*mqd_obj;
907	u32 doorbell_index;
908	unsigned		wptr_offs;
909};
910
911struct radeon_mec {
912	struct radeon_bo	*hpd_eop_obj;
913	u64			hpd_eop_gpu_addr;
914	u32 num_pipe;
915	u32 num_mec;
916	u32 num_queue;
917};
918
919/*
920 * VM
921 */
922
923/* maximum number of VMIDs */
924#define RADEON_NUM_VM	16
925
926/* number of entries in page table */
927#define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size)
928
929/* PTBs (Page Table Blocks) need to be aligned to 32K */
930#define RADEON_VM_PTB_ALIGN_SIZE   32768
931#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
932#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
933
934#define R600_PTE_VALID		(1 << 0)
935#define R600_PTE_SYSTEM		(1 << 1)
936#define R600_PTE_SNOOPED	(1 << 2)
937#define R600_PTE_READABLE	(1 << 5)
938#define R600_PTE_WRITEABLE	(1 << 6)
939
940/* PTE (Page Table Entry) fragment field for different page sizes */
941#define R600_PTE_FRAG_4KB	(0 << 7)
942#define R600_PTE_FRAG_64KB	(4 << 7)
943#define R600_PTE_FRAG_256KB	(6 << 7)
944
945/* flags needed to be set so we can copy directly from the GART table */
946#define R600_PTE_GART_MASK	( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
947				  R600_PTE_SYSTEM | R600_PTE_VALID )
948
949struct radeon_vm_pt {
950	struct radeon_bo		*bo;
951	uint64_t			addr;
952};
953
954struct radeon_vm_id {
955	unsigned		id;
956	uint64_t		pd_gpu_addr;
957	/* last flushed PD/PT update */
958	struct radeon_fence	*flushed_updates;
959	/* last use of vmid */
960	struct radeon_fence	*last_id_use;
961};
962
963struct radeon_vm {
964	struct mutex		mutex;
965
966	struct rb_root_cached	va;
967
968	/* protecting invalidated and freed */
969	spinlock_t		status_lock;
970
971	/* BOs moved, but not yet updated in the PT */
972	struct list_head	invalidated;
973
974	/* BOs freed, but not yet updated in the PT */
975	struct list_head	freed;
976
977	/* BOs cleared in the PT */
978	struct list_head	cleared;
979
980	/* contains the page directory */
981	struct radeon_bo	*page_directory;
982	unsigned		max_pde_used;
983
984	/* array of page tables, one for each page directory entry */
985	struct radeon_vm_pt	*page_tables;
986
987	struct radeon_bo_va	*ib_bo_va;
988
989	/* for id and flush management per ring */
990	struct radeon_vm_id	ids[RADEON_NUM_RINGS];
991};
992
993struct radeon_vm_manager {
994	struct radeon_fence		*active[RADEON_NUM_VM];
995	uint32_t			max_pfn;
996	/* number of VMIDs */
997	unsigned			nvm;
998	/* vram base address for page table entry  */
999	u64				vram_base_offset;
1000	/* is vm enabled? */
1001	bool				enabled;
1002	/* for hw to save the PD addr on suspend/resume */
1003	uint32_t			saved_table_addr[RADEON_NUM_VM];
1004};
1005
1006/*
1007 * file private structure
1008 */
1009struct radeon_fpriv {
1010	struct radeon_vm		vm;
1011};
1012
1013/*
1014 * R6xx+ IH ring
1015 */
1016struct r600_ih {
1017	struct radeon_bo	*ring_obj;
1018	volatile uint32_t	*ring;
1019	unsigned		rptr;
1020	unsigned		ring_size;
1021	uint64_t		gpu_addr;
1022	uint32_t		ptr_mask;
1023	atomic_t		lock;
1024	bool                    enabled;
1025};
1026
1027/*
1028 * RLC stuff
1029 */
1030#include "clearstate_defs.h"
1031
1032struct radeon_rlc {
1033	/* for power gating */
1034	struct radeon_bo	*save_restore_obj;
1035	uint64_t		save_restore_gpu_addr;
1036	volatile uint32_t	*sr_ptr;
1037	const u32               *reg_list;
1038	u32                     reg_list_size;
1039	/* for clear state */
1040	struct radeon_bo	*clear_state_obj;
1041	uint64_t		clear_state_gpu_addr;
1042	volatile uint32_t	*cs_ptr;
1043	const struct cs_section_def   *cs_data;
1044	u32                     clear_state_size;
1045	/* for cp tables */
1046	struct radeon_bo	*cp_table_obj;
1047	uint64_t		cp_table_gpu_addr;
1048	volatile uint32_t	*cp_table_ptr;
1049	u32                     cp_table_size;
1050};
1051
1052int radeon_ib_get(struct radeon_device *rdev, int ring,
1053		  struct radeon_ib *ib, struct radeon_vm *vm,
1054		  unsigned size);
1055void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
1056int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
1057		       struct radeon_ib *const_ib, bool hdp_flush);
1058int radeon_ib_pool_init(struct radeon_device *rdev);
1059void radeon_ib_pool_fini(struct radeon_device *rdev);
1060int radeon_ib_ring_tests(struct radeon_device *rdev);
1061/* Ring access between begin & end cannot sleep */
1062bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
1063				      struct radeon_ring *ring);
1064void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
1065int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
1066int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
1067void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
1068			bool hdp_flush);
1069void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
1070			       bool hdp_flush);
1071void radeon_ring_undo(struct radeon_ring *ring);
1072void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
1073int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
1074void radeon_ring_lockup_update(struct radeon_device *rdev,
1075			       struct radeon_ring *ring);
1076bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
1077unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
1078			    uint32_t **data);
1079int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
1080			unsigned size, uint32_t *data);
1081int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
1082		     unsigned rptr_offs, u32 nop);
1083void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
1084
1085
1086/* r600 async dma */
1087void r600_dma_stop(struct radeon_device *rdev);
1088int r600_dma_resume(struct radeon_device *rdev);
1089void r600_dma_fini(struct radeon_device *rdev);
1090
1091void cayman_dma_stop(struct radeon_device *rdev);
1092int cayman_dma_resume(struct radeon_device *rdev);
1093void cayman_dma_fini(struct radeon_device *rdev);
1094
1095/*
1096 * CS.
1097 */
1098struct radeon_cs_chunk {
1099	uint32_t		length_dw;
1100	uint32_t		*kdata;
1101	void __user		*user_ptr;
1102};
1103
1104struct radeon_cs_parser {
1105	struct device		*dev;
1106	struct radeon_device	*rdev;
1107	struct drm_file		*filp;
1108	/* chunks */
1109	unsigned		nchunks;
1110	struct radeon_cs_chunk	*chunks;
1111	uint64_t		*chunks_array;
1112	/* IB */
1113	unsigned		idx;
1114	/* relocations */
1115	unsigned		nrelocs;
1116	struct radeon_bo_list	*relocs;
1117	struct radeon_bo_list	*vm_bos;
1118	struct list_head	validated;
1119	unsigned		dma_reloc_idx;
1120	/* indices of various chunks */
1121	struct radeon_cs_chunk  *chunk_ib;
1122	struct radeon_cs_chunk  *chunk_relocs;
1123	struct radeon_cs_chunk  *chunk_flags;
1124	struct radeon_cs_chunk  *chunk_const_ib;
1125	struct radeon_ib	ib;
1126	struct radeon_ib	const_ib;
1127	void			*track;
1128	unsigned		family;
1129	int			parser_error;
1130	u32			cs_flags;
1131	u32			ring;
1132	s32			priority;
1133	struct ww_acquire_ctx	ticket;
1134};
1135
1136static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
1137{
1138	struct radeon_cs_chunk *ibc = p->chunk_ib;
1139
1140	if (ibc->kdata)
1141		return ibc->kdata[idx];
1142	return p->ib.ptr[idx];
1143}
1144
1145
1146struct radeon_cs_packet {
1147	unsigned	idx;
1148	unsigned	type;
1149	unsigned	reg;
1150	unsigned	opcode;
1151	int		count;
1152	unsigned	one_reg_wr;
1153};
1154
1155typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
1156				      struct radeon_cs_packet *pkt,
1157				      unsigned idx, unsigned reg);
1158typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
1159				      struct radeon_cs_packet *pkt);
1160
1161
1162/*
1163 * AGP
1164 */
1165int radeon_agp_init(struct radeon_device *rdev);
1166void radeon_agp_resume(struct radeon_device *rdev);
1167void radeon_agp_suspend(struct radeon_device *rdev);
1168void radeon_agp_fini(struct radeon_device *rdev);
1169
1170
1171/*
1172 * Writeback
1173 */
1174struct radeon_wb {
1175	struct radeon_bo	*wb_obj;
1176	volatile uint32_t	*wb;
1177	uint64_t		gpu_addr;
1178	bool                    enabled;
1179	bool                    use_event;
1180};
1181
1182#define RADEON_WB_SCRATCH_OFFSET 0
1183#define RADEON_WB_RING0_NEXT_RPTR 256
1184#define RADEON_WB_CP_RPTR_OFFSET 1024
1185#define RADEON_WB_CP1_RPTR_OFFSET 1280
1186#define RADEON_WB_CP2_RPTR_OFFSET 1536
1187#define R600_WB_DMA_RPTR_OFFSET   1792
1188#define R600_WB_IH_WPTR_OFFSET   2048
1189#define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
1190#define R600_WB_EVENT_OFFSET     3072
1191#define CIK_WB_CP1_WPTR_OFFSET     3328
1192#define CIK_WB_CP2_WPTR_OFFSET     3584
1193#define R600_WB_DMA_RING_TEST_OFFSET 3588
1194#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
1195
1196/**
1197 * struct radeon_pm - power management datas
1198 * @max_bandwidth:      maximum bandwidth the gpu has (MByte/s)
1199 * @igp_sideport_mclk:  sideport memory clock Mhz (rs690,rs740,rs780,rs880)
1200 * @igp_system_mclk:    system clock Mhz (rs690,rs740,rs780,rs880)
1201 * @igp_ht_link_clk:    ht link clock Mhz (rs690,rs740,rs780,rs880)
1202 * @igp_ht_link_width:  ht link width in bits (rs690,rs740,rs780,rs880)
1203 * @k8_bandwidth:       k8 bandwidth the gpu has (MByte/s) (IGP)
1204 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
1205 * @ht_bandwidth:       ht bandwidth the gpu has (MByte/s) (IGP)
1206 * @core_bandwidth:     core GPU bandwidth the gpu has (MByte/s) (IGP)
1207 * @sclk:          	GPU clock Mhz (core bandwidth depends of this clock)
1208 * @needed_bandwidth:   current bandwidth needs
1209 *
1210 * It keeps track of various data needed to take powermanagement decision.
1211 * Bandwidth need is used to determine minimun clock of the GPU and memory.
1212 * Equation between gpu/memory clock and available bandwidth is hw dependent
1213 * (type of memory, bus size, efficiency, ...)
1214 */
1215
1216enum radeon_pm_method {
1217	PM_METHOD_PROFILE,
1218	PM_METHOD_DYNPM,
1219	PM_METHOD_DPM,
1220};
1221
1222enum radeon_dynpm_state {
1223	DYNPM_STATE_DISABLED,
1224	DYNPM_STATE_MINIMUM,
1225	DYNPM_STATE_PAUSED,
1226	DYNPM_STATE_ACTIVE,
1227	DYNPM_STATE_SUSPENDED,
1228};
1229enum radeon_dynpm_action {
1230	DYNPM_ACTION_NONE,
1231	DYNPM_ACTION_MINIMUM,
1232	DYNPM_ACTION_DOWNCLOCK,
1233	DYNPM_ACTION_UPCLOCK,
1234	DYNPM_ACTION_DEFAULT
1235};
1236
1237enum radeon_voltage_type {
1238	VOLTAGE_NONE = 0,
1239	VOLTAGE_GPIO,
1240	VOLTAGE_VDDC,
1241	VOLTAGE_SW
1242};
1243
1244enum radeon_pm_state_type {
1245	/* not used for dpm */
1246	POWER_STATE_TYPE_DEFAULT,
1247	POWER_STATE_TYPE_POWERSAVE,
1248	/* user selectable states */
1249	POWER_STATE_TYPE_BATTERY,
1250	POWER_STATE_TYPE_BALANCED,
1251	POWER_STATE_TYPE_PERFORMANCE,
1252	/* internal states */
1253	POWER_STATE_TYPE_INTERNAL_UVD,
1254	POWER_STATE_TYPE_INTERNAL_UVD_SD,
1255	POWER_STATE_TYPE_INTERNAL_UVD_HD,
1256	POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1257	POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1258	POWER_STATE_TYPE_INTERNAL_BOOT,
1259	POWER_STATE_TYPE_INTERNAL_THERMAL,
1260	POWER_STATE_TYPE_INTERNAL_ACPI,
1261	POWER_STATE_TYPE_INTERNAL_ULV,
1262	POWER_STATE_TYPE_INTERNAL_3DPERF,
1263};
1264
1265enum radeon_pm_profile_type {
1266	PM_PROFILE_DEFAULT,
1267	PM_PROFILE_AUTO,
1268	PM_PROFILE_LOW,
1269	PM_PROFILE_MID,
1270	PM_PROFILE_HIGH,
1271};
1272
1273#define PM_PROFILE_DEFAULT_IDX 0
1274#define PM_PROFILE_LOW_SH_IDX  1
1275#define PM_PROFILE_MID_SH_IDX  2
1276#define PM_PROFILE_HIGH_SH_IDX 3
1277#define PM_PROFILE_LOW_MH_IDX  4
1278#define PM_PROFILE_MID_MH_IDX  5
1279#define PM_PROFILE_HIGH_MH_IDX 6
1280#define PM_PROFILE_MAX         7
1281
1282struct radeon_pm_profile {
1283	int dpms_off_ps_idx;
1284	int dpms_on_ps_idx;
1285	int dpms_off_cm_idx;
1286	int dpms_on_cm_idx;
1287};
1288
1289enum radeon_int_thermal_type {
1290	THERMAL_TYPE_NONE,
1291	THERMAL_TYPE_EXTERNAL,
1292	THERMAL_TYPE_EXTERNAL_GPIO,
1293	THERMAL_TYPE_RV6XX,
1294	THERMAL_TYPE_RV770,
1295	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1296	THERMAL_TYPE_EVERGREEN,
1297	THERMAL_TYPE_SUMO,
1298	THERMAL_TYPE_NI,
1299	THERMAL_TYPE_SI,
1300	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1301	THERMAL_TYPE_CI,
1302	THERMAL_TYPE_KV,
1303};
1304
1305struct radeon_voltage {
1306	enum radeon_voltage_type type;
1307	/* gpio voltage */
1308	struct radeon_gpio_rec gpio;
1309	u32 delay; /* delay in usec from voltage drop to sclk change */
1310	bool active_high; /* voltage drop is active when bit is high */
1311	/* VDDC voltage */
1312	u8 vddc_id; /* index into vddc voltage table */
1313	u8 vddci_id; /* index into vddci voltage table */
1314	bool vddci_enabled;
1315	/* r6xx+ sw */
1316	u16 voltage;
1317	/* evergreen+ vddci */
1318	u16 vddci;
1319};
1320
1321/* clock mode flags */
1322#define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)
1323
1324struct radeon_pm_clock_info {
1325	/* memory clock */
1326	u32 mclk;
1327	/* engine clock */
1328	u32 sclk;
1329	/* voltage info */
1330	struct radeon_voltage voltage;
1331	/* standardized clock flags */
1332	u32 flags;
1333};
1334
1335/* state flags */
1336#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
1337
1338struct radeon_power_state {
1339	enum radeon_pm_state_type type;
1340	struct radeon_pm_clock_info *clock_info;
1341	/* number of valid clock modes in this power state */
1342	int num_clock_modes;
1343	struct radeon_pm_clock_info *default_clock_mode;
1344	/* standardized state flags */
1345	u32 flags;
1346	u32 misc; /* vbios specific flags */
1347	u32 misc2; /* vbios specific flags */
1348	int pcie_lanes; /* pcie lanes */
1349};
1350
1351/*
1352 * Some modes are overclocked by very low value, accept them
1353 */
1354#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
1355
1356enum radeon_dpm_auto_throttle_src {
1357	RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
1358	RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1359};
1360
1361enum radeon_dpm_event_src {
1362	RADEON_DPM_EVENT_SRC_ANALOG = 0,
1363	RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
1364	RADEON_DPM_EVENT_SRC_DIGITAL = 2,
1365	RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1366	RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1367};
1368
1369#define RADEON_MAX_VCE_LEVELS 6
1370
1371enum radeon_vce_level {
1372	RADEON_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
1373	RADEON_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
1374	RADEON_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
1375	RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1376	RADEON_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
1377	RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1378};
1379
1380struct radeon_ps {
1381	u32 caps; /* vbios flags */
1382	u32 class; /* vbios flags */
1383	u32 class2; /* vbios flags */
1384	/* UVD clocks */
1385	u32 vclk;
1386	u32 dclk;
1387	/* VCE clocks */
1388	u32 evclk;
1389	u32 ecclk;
1390	bool vce_active;
1391	enum radeon_vce_level vce_level;
1392	/* asic priv */
1393	void *ps_priv;
1394};
1395
1396struct radeon_dpm_thermal {
1397	/* thermal interrupt work */
1398	struct work_struct work;
1399	/* low temperature threshold */
1400	int                min_temp;
1401	/* high temperature threshold */
1402	int                max_temp;
1403	/* was interrupt low to high or high to low */
1404	bool               high_to_low;
1405};
1406
1407enum radeon_clk_action
1408{
1409	RADEON_SCLK_UP = 1,
1410	RADEON_SCLK_DOWN
1411};
1412
1413struct radeon_blacklist_clocks
1414{
1415	u32 sclk;
1416	u32 mclk;
1417	enum radeon_clk_action action;
1418};
1419
1420struct radeon_clock_and_voltage_limits {
1421	u32 sclk;
1422	u32 mclk;
1423	u16 vddc;
1424	u16 vddci;
1425};
1426
1427struct radeon_clock_array {
1428	u32 count;
1429	u32 *values;
1430};
1431
1432struct radeon_clock_voltage_dependency_entry {
1433	u32 clk;
1434	u16 v;
1435};
1436
1437struct radeon_clock_voltage_dependency_table {
1438	u32 count;
1439	struct radeon_clock_voltage_dependency_entry *entries;
1440};
1441
1442union radeon_cac_leakage_entry {
1443	struct {
1444		u16 vddc;
1445		u32 leakage;
1446	};
1447	struct {
1448		u16 vddc1;
1449		u16 vddc2;
1450		u16 vddc3;
1451	};
1452};
1453
1454struct radeon_cac_leakage_table {
1455	u32 count;
1456	union radeon_cac_leakage_entry *entries;
1457};
1458
1459struct radeon_phase_shedding_limits_entry {
1460	u16 voltage;
1461	u32 sclk;
1462	u32 mclk;
1463};
1464
1465struct radeon_phase_shedding_limits_table {
1466	u32 count;
1467	struct radeon_phase_shedding_limits_entry *entries;
1468};
1469
1470struct radeon_uvd_clock_voltage_dependency_entry {
1471	u32 vclk;
1472	u32 dclk;
1473	u16 v;
1474};
1475
1476struct radeon_uvd_clock_voltage_dependency_table {
1477	u8 count;
1478	struct radeon_uvd_clock_voltage_dependency_entry *entries;
1479};
1480
1481struct radeon_vce_clock_voltage_dependency_entry {
1482	u32 ecclk;
1483	u32 evclk;
1484	u16 v;
1485};
1486
1487struct radeon_vce_clock_voltage_dependency_table {
1488	u8 count;
1489	struct radeon_vce_clock_voltage_dependency_entry *entries;
1490};
1491
1492struct radeon_ppm_table {
1493	u8 ppm_design;
1494	u16 cpu_core_number;
1495	u32 platform_tdp;
1496	u32 small_ac_platform_tdp;
1497	u32 platform_tdc;
1498	u32 small_ac_platform_tdc;
1499	u32 apu_tdp;
1500	u32 dgpu_tdp;
1501	u32 dgpu_ulv_power;
1502	u32 tj_max;
1503};
1504
1505struct radeon_cac_tdp_table {
1506	u16 tdp;
1507	u16 configurable_tdp;
1508	u16 tdc;
1509	u16 battery_power_limit;
1510	u16 small_power_limit;
1511	u16 low_cac_leakage;
1512	u16 high_cac_leakage;
1513	u16 maximum_power_delivery_limit;
1514};
1515
1516struct radeon_dpm_dynamic_state {
1517	struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
1518	struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
1519	struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
1520	struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1521	struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1522	struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1523	struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1524	struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1525	struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1526	struct radeon_clock_array valid_sclk_values;
1527	struct radeon_clock_array valid_mclk_values;
1528	struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
1529	struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
1530	u32 mclk_sclk_ratio;
1531	u32 sclk_mclk_delta;
1532	u16 vddc_vddci_delta;
1533	u16 min_vddc_for_pcie_gen2;
1534	struct radeon_cac_leakage_table cac_leakage_table;
1535	struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
1536	struct radeon_ppm_table *ppm_table;
1537	struct radeon_cac_tdp_table *cac_tdp_table;
1538};
1539
1540struct radeon_dpm_fan {
1541	u16 t_min;
1542	u16 t_med;
1543	u16 t_high;
1544	u16 pwm_min;
1545	u16 pwm_med;
1546	u16 pwm_high;
1547	u8 t_hyst;
1548	u32 cycle_delay;
1549	u16 t_max;
1550	u8 control_mode;
1551	u16 default_max_fan_pwm;
1552	u16 default_fan_output_sensitivity;
1553	u16 fan_output_sensitivity;
1554	bool ucode_fan_control;
1555};
1556
1557enum radeon_pcie_gen {
1558	RADEON_PCIE_GEN1 = 0,
1559	RADEON_PCIE_GEN2 = 1,
1560	RADEON_PCIE_GEN3 = 2,
1561	RADEON_PCIE_GEN_INVALID = 0xffff
1562};
1563
1564enum radeon_dpm_forced_level {
1565	RADEON_DPM_FORCED_LEVEL_AUTO = 0,
1566	RADEON_DPM_FORCED_LEVEL_LOW = 1,
1567	RADEON_DPM_FORCED_LEVEL_HIGH = 2,
1568};
1569
1570struct radeon_vce_state {
1571	/* vce clocks */
1572	u32 evclk;
1573	u32 ecclk;
1574	/* gpu clocks */
1575	u32 sclk;
1576	u32 mclk;
1577	u8 clk_idx;
1578	u8 pstate;
1579};
1580
1581struct radeon_dpm {
1582	struct radeon_ps        *ps;
1583	/* number of valid power states */
1584	int                     num_ps;
1585	/* current power state that is active */
1586	struct radeon_ps        *current_ps;
1587	/* requested power state */
1588	struct radeon_ps        *requested_ps;
1589	/* boot up power state */
1590	struct radeon_ps        *boot_ps;
1591	/* default uvd power state */
1592	struct radeon_ps        *uvd_ps;
1593	/* vce requirements */
1594	struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS];
1595	enum radeon_vce_level vce_level;
1596	enum radeon_pm_state_type state;
1597	enum radeon_pm_state_type user_state;
1598	u32                     platform_caps;
1599	u32                     voltage_response_time;
1600	u32                     backbias_response_time;
1601	void                    *priv;
1602	u32			new_active_crtcs;
1603	int			new_active_crtc_count;
1604	u32			current_active_crtcs;
1605	int			current_active_crtc_count;
1606	bool single_display;
1607	struct radeon_dpm_dynamic_state dyn_state;
1608	struct radeon_dpm_fan fan;
1609	u32 tdp_limit;
1610	u32 near_tdp_limit;
1611	u32 near_tdp_limit_adjusted;
1612	u32 sq_ramping_threshold;
1613	u32 cac_leakage;
1614	u16 tdp_od_limit;
1615	u32 tdp_adjustment;
1616	u16 load_line_slope;
1617	bool power_control;
1618	bool ac_power;
1619	/* special states active */
1620	bool                    thermal_active;
1621	bool                    uvd_active;
1622	bool                    vce_active;
1623	/* thermal handling */
1624	struct radeon_dpm_thermal thermal;
1625	/* forced levels */
1626	enum radeon_dpm_forced_level forced_level;
1627	/* track UVD streams */
1628	unsigned sd;
1629	unsigned hd;
1630};
1631
1632void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
1633void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable);
1634
1635struct radeon_pm {
1636	struct mutex		mutex;
1637	/* write locked while reprogramming mclk */
1638	struct rw_semaphore	mclk_lock;
1639	u32			active_crtcs;
1640	int			active_crtc_count;
1641	int			req_vblank;
1642	bool			vblank_sync;
1643	fixed20_12		max_bandwidth;
1644	fixed20_12		igp_sideport_mclk;
1645	fixed20_12		igp_system_mclk;
1646	fixed20_12		igp_ht_link_clk;
1647	fixed20_12		igp_ht_link_width;
1648	fixed20_12		k8_bandwidth;
1649	fixed20_12		sideport_bandwidth;
1650	fixed20_12		ht_bandwidth;
1651	fixed20_12		core_bandwidth;
1652	fixed20_12		sclk;
1653	fixed20_12		mclk;
1654	fixed20_12		needed_bandwidth;
1655	struct radeon_power_state *power_state;
1656	/* number of valid power states */
1657	int                     num_power_states;
1658	int                     current_power_state_index;
1659	int                     current_clock_mode_index;
1660	int                     requested_power_state_index;
1661	int                     requested_clock_mode_index;
1662	int                     default_power_state_index;
1663	u32                     current_sclk;
1664	u32                     current_mclk;
1665	u16                     current_vddc;
1666	u16                     current_vddci;
1667	u32                     default_sclk;
1668	u32                     default_mclk;
1669	u16                     default_vddc;
1670	u16                     default_vddci;
1671	struct radeon_i2c_chan *i2c_bus;
1672	/* selected pm method */
1673	enum radeon_pm_method     pm_method;
1674	/* dynpm power management */
1675	struct delayed_work	dynpm_idle_work;
1676	enum radeon_dynpm_state	dynpm_state;
1677	enum radeon_dynpm_action	dynpm_planned_action;
1678	unsigned long		dynpm_action_timeout;
1679	bool                    dynpm_can_upclock;
1680	bool                    dynpm_can_downclock;
1681	/* profile-based power management */
1682	enum radeon_pm_profile_type profile;
1683	int                     profile_index;
1684	struct radeon_pm_profile profiles[PM_PROFILE_MAX];
1685	/* internal thermal controller on rv6xx+ */
1686	enum radeon_int_thermal_type int_thermal_type;
1687	struct device	        *int_hwmon_dev;
1688	/* fan control parameters */
1689	bool                    no_fan;
1690	u8                      fan_pulses_per_revolution;
1691	u8                      fan_min_rpm;
1692	u8                      fan_max_rpm;
1693	/* dpm */
1694	bool                    dpm_enabled;
1695	bool                    sysfs_initialized;
1696	struct radeon_dpm       dpm;
1697};
1698
1699#define RADEON_PCIE_SPEED_25 1
1700#define RADEON_PCIE_SPEED_50 2
1701#define RADEON_PCIE_SPEED_80 4
1702
1703int radeon_pm_get_type_index(struct radeon_device *rdev,
1704			     enum radeon_pm_state_type ps_type,
1705			     int instance);
1706/*
1707 * UVD
1708 */
1709#define RADEON_DEFAULT_UVD_HANDLES	10
1710#define RADEON_MAX_UVD_HANDLES		30
1711#define RADEON_UVD_STACK_SIZE		(200*1024)
1712#define RADEON_UVD_HEAP_SIZE		(256*1024)
1713#define RADEON_UVD_SESSION_SIZE		(50*1024)
1714
1715struct radeon_uvd {
1716	bool			fw_header_present;
1717	struct radeon_bo	*vcpu_bo;
1718	void			*cpu_addr;
1719	uint64_t		gpu_addr;
1720	unsigned		max_handles;
1721	atomic_t		handles[RADEON_MAX_UVD_HANDLES];
1722	struct drm_file		*filp[RADEON_MAX_UVD_HANDLES];
1723	unsigned		img_size[RADEON_MAX_UVD_HANDLES];
1724	struct delayed_work	idle_work;
1725};
1726
1727int radeon_uvd_init(struct radeon_device *rdev);
1728void radeon_uvd_fini(struct radeon_device *rdev);
1729int radeon_uvd_suspend(struct radeon_device *rdev);
1730int radeon_uvd_resume(struct radeon_device *rdev);
1731int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
1732			      uint32_t handle, struct radeon_fence **fence);
1733int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
1734			       uint32_t handle, struct radeon_fence **fence);
1735void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
1736				       uint32_t allowed_domains);
1737void radeon_uvd_free_handles(struct radeon_device *rdev,
1738			     struct drm_file *filp);
1739int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
1740void radeon_uvd_note_usage(struct radeon_device *rdev);
1741int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1742				  unsigned vclk, unsigned dclk,
1743				  unsigned vco_min, unsigned vco_max,
1744				  unsigned fb_factor, unsigned fb_mask,
1745				  unsigned pd_min, unsigned pd_max,
1746				  unsigned pd_even,
1747				  unsigned *optimal_fb_div,
1748				  unsigned *optimal_vclk_div,
1749				  unsigned *optimal_dclk_div);
1750int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1751                                unsigned cg_upll_func_cntl);
1752
1753/*
1754 * VCE
1755 */
1756#define RADEON_MAX_VCE_HANDLES	16
1757
1758struct radeon_vce {
1759	struct radeon_bo	*vcpu_bo;
1760	uint64_t		gpu_addr;
1761	unsigned		fw_version;
1762	unsigned		fb_version;
1763	atomic_t		handles[RADEON_MAX_VCE_HANDLES];
1764	struct drm_file		*filp[RADEON_MAX_VCE_HANDLES];
1765	unsigned		img_size[RADEON_MAX_VCE_HANDLES];
1766	struct delayed_work	idle_work;
1767	uint32_t		keyselect;
1768};
1769
1770int radeon_vce_init(struct radeon_device *rdev);
1771void radeon_vce_fini(struct radeon_device *rdev);
1772int radeon_vce_suspend(struct radeon_device *rdev);
1773int radeon_vce_resume(struct radeon_device *rdev);
1774int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
1775			      uint32_t handle, struct radeon_fence **fence);
1776int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
1777			       uint32_t handle, struct radeon_fence **fence);
1778void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
1779void radeon_vce_note_usage(struct radeon_device *rdev);
1780int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
1781int radeon_vce_cs_parse(struct radeon_cs_parser *p);
1782bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
1783			       struct radeon_ring *ring,
1784			       struct radeon_semaphore *semaphore,
1785			       bool emit_wait);
1786void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
1787void radeon_vce_fence_emit(struct radeon_device *rdev,
1788			   struct radeon_fence *fence);
1789int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
1790int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
1791
1792struct r600_audio_pin {
1793	int			channels;
1794	int			rate;
1795	int			bits_per_sample;
1796	u8			status_bits;
1797	u8			category_code;
1798	u32			offset;
1799	bool			connected;
1800	u32			id;
1801};
1802
1803struct r600_audio {
1804	bool enabled;
1805	struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
1806	int num_pins;
1807	struct radeon_audio_funcs *hdmi_funcs;
1808	struct radeon_audio_funcs *dp_funcs;
1809	struct radeon_audio_basic_funcs *funcs;
1810};
1811
1812/*
1813 * Benchmarking
1814 */
1815void radeon_benchmark(struct radeon_device *rdev, int test_number);
1816
1817
1818/*
1819 * Testing
1820 */
1821void radeon_test_moves(struct radeon_device *rdev);
1822void radeon_test_ring_sync(struct radeon_device *rdev,
1823			   struct radeon_ring *cpA,
1824			   struct radeon_ring *cpB);
1825void radeon_test_syncing(struct radeon_device *rdev);
1826
1827/*
1828 * MMU Notifier
1829 */
1830#if defined(CONFIG_MMU_NOTIFIER)
1831int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
1832void radeon_mn_unregister(struct radeon_bo *bo);
1833#else
1834static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
1835{
1836	return -ENODEV;
1837}
1838static inline void radeon_mn_unregister(struct radeon_bo *bo) {}
1839#endif
1840
1841/*
1842 * Debugfs
1843 */
1844struct radeon_debugfs {
1845	struct drm_info_list	*files;
1846	unsigned		num_files;
1847};
1848
1849int radeon_debugfs_add_files(struct radeon_device *rdev,
1850			     struct drm_info_list *files,
1851			     unsigned nfiles);
1852int radeon_debugfs_fence_init(struct radeon_device *rdev);
1853
1854/*
1855 * ASIC ring specific functions.
1856 */
1857struct radeon_asic_ring {
1858	/* ring read/write ptr handling */
1859	u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1860	u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1861	void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1862
1863	/* validating and patching of IBs */
1864	int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1865	int (*cs_parse)(struct radeon_cs_parser *p);
1866
1867	/* command emmit functions */
1868	void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1869	void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1870	void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
1871	bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1872			       struct radeon_semaphore *semaphore, bool emit_wait);
1873	void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
1874			 unsigned vm_id, uint64_t pd_addr);
1875
1876	/* testing functions */
1877	int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1878	int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1879	bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1880
1881	/* deprecated */
1882	void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1883};
1884
1885/*
1886 * ASIC specific functions.
1887 */
1888struct radeon_asic {
1889	int (*init)(struct radeon_device *rdev);
1890	void (*fini)(struct radeon_device *rdev);
1891	int (*resume)(struct radeon_device *rdev);
1892	int (*suspend)(struct radeon_device *rdev);
1893	void (*vga_set_state)(struct radeon_device *rdev, bool state);
1894	int (*asic_reset)(struct radeon_device *rdev, bool hard);
1895	/* Flush the HDP cache via MMIO */
1896	void (*mmio_hdp_flush)(struct radeon_device *rdev);
1897	/* check if 3D engine is idle */
1898	bool (*gui_idle)(struct radeon_device *rdev);
1899	/* wait for mc_idle */
1900	int (*mc_wait_for_idle)(struct radeon_device *rdev);
1901	/* get the reference clock */
1902	u32 (*get_xclk)(struct radeon_device *rdev);
1903	/* get the gpu clock counter */
1904	uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
1905	/* get register for info ioctl */
1906	int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
1907	/* gart */
1908	struct {
1909		void (*tlb_flush)(struct radeon_device *rdev);
1910		uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
1911		void (*set_page)(struct radeon_device *rdev, unsigned i,
1912				 uint64_t entry);
1913	} gart;
1914	struct {
1915		int (*init)(struct radeon_device *rdev);
1916		void (*fini)(struct radeon_device *rdev);
1917		void (*copy_pages)(struct radeon_device *rdev,
1918				   struct radeon_ib *ib,
1919				   uint64_t pe, uint64_t src,
1920				   unsigned count);
1921		void (*write_pages)(struct radeon_device *rdev,
1922				    struct radeon_ib *ib,
1923				    uint64_t pe,
1924				    uint64_t addr, unsigned count,
1925				    uint32_t incr, uint32_t flags);
1926		void (*set_pages)(struct radeon_device *rdev,
1927				  struct radeon_ib *ib,
1928				  uint64_t pe,
1929				  uint64_t addr, unsigned count,
1930				  uint32_t incr, uint32_t flags);
1931		void (*pad_ib)(struct radeon_ib *ib);
1932	} vm;
1933	/* ring specific callbacks */
1934	const struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
1935	/* irqs */
1936	struct {
1937		int (*set)(struct radeon_device *rdev);
1938		int (*process)(struct radeon_device *rdev);
1939	} irq;
1940	/* displays */
1941	struct {
1942		/* display watermarks */
1943		void (*bandwidth_update)(struct radeon_device *rdev);
1944		/* get frame count */
1945		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1946		/* wait for vblank */
1947		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1948		/* set backlight level */
1949		void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
1950		/* get backlight level */
1951		u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
1952		/* audio callbacks */
1953		void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
1954		void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
1955	} display;
1956	/* copy functions for bo handling */
1957	struct {
1958		struct radeon_fence *(*blit)(struct radeon_device *rdev,
1959					     uint64_t src_offset,
1960					     uint64_t dst_offset,
1961					     unsigned num_gpu_pages,
1962					     struct dma_resv *resv);
1963		u32 blit_ring_index;
1964		struct radeon_fence *(*dma)(struct radeon_device *rdev,
1965					    uint64_t src_offset,
1966					    uint64_t dst_offset,
1967					    unsigned num_gpu_pages,
1968					    struct dma_resv *resv);
1969		u32 dma_ring_index;
1970		/* method used for bo copy */
1971		struct radeon_fence *(*copy)(struct radeon_device *rdev,
1972					     uint64_t src_offset,
1973					     uint64_t dst_offset,
1974					     unsigned num_gpu_pages,
1975					     struct dma_resv *resv);
1976		/* ring used for bo copies */
1977		u32 copy_ring_index;
1978	} copy;
1979	/* surfaces */
1980	struct {
1981		int (*set_reg)(struct radeon_device *rdev, int reg,
1982				       uint32_t tiling_flags, uint32_t pitch,
1983				       uint32_t offset, uint32_t obj_size);
1984		void (*clear_reg)(struct radeon_device *rdev, int reg);
1985	} surface;
1986	/* hotplug detect */
1987	struct {
1988		void (*init)(struct radeon_device *rdev);
1989		void (*fini)(struct radeon_device *rdev);
1990		bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1991		void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1992	} hpd;
1993	/* static power management */
1994	struct {
1995		void (*misc)(struct radeon_device *rdev);
1996		void (*prepare)(struct radeon_device *rdev);
1997		void (*finish)(struct radeon_device *rdev);
1998		void (*init_profile)(struct radeon_device *rdev);
1999		void (*get_dynpm_state)(struct radeon_device *rdev);
2000		uint32_t (*get_engine_clock)(struct radeon_device *rdev);
2001		void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
2002		uint32_t (*get_memory_clock)(struct radeon_device *rdev);
2003		void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
2004		int (*get_pcie_lanes)(struct radeon_device *rdev);
2005		void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
2006		void (*set_clock_gating)(struct radeon_device *rdev, int enable);
2007		int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
2008		int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
2009		int (*get_temperature)(struct radeon_device *rdev);
2010	} pm;
2011	/* dynamic power management */
2012	struct {
2013		int (*init)(struct radeon_device *rdev);
2014		void (*setup_asic)(struct radeon_device *rdev);
2015		int (*enable)(struct radeon_device *rdev);
2016		int (*late_enable)(struct radeon_device *rdev);
2017		void (*disable)(struct radeon_device *rdev);
2018		int (*pre_set_power_state)(struct radeon_device *rdev);
2019		int (*set_power_state)(struct radeon_device *rdev);
2020		void (*post_set_power_state)(struct radeon_device *rdev);
2021		void (*display_configuration_changed)(struct radeon_device *rdev);
2022		void (*fini)(struct radeon_device *rdev);
2023		u32 (*get_sclk)(struct radeon_device *rdev, bool low);
2024		u32 (*get_mclk)(struct radeon_device *rdev, bool low);
2025		void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
2026		void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
2027		int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
2028		bool (*vblank_too_short)(struct radeon_device *rdev);
2029		void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
2030		void (*enable_bapm)(struct radeon_device *rdev, bool enable);
2031		void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode);
2032		u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
2033		int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
2034		int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
2035		u32 (*get_current_sclk)(struct radeon_device *rdev);
2036		u32 (*get_current_mclk)(struct radeon_device *rdev);
2037	} dpm;
2038	/* pageflipping */
2039	struct {
2040		void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base, bool async);
2041		bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
2042	} pflip;
2043};
2044
2045/*
2046 * Asic structures
2047 */
2048struct r100_asic {
2049	const unsigned		*reg_safe_bm;
2050	unsigned		reg_safe_bm_size;
2051	u32			hdp_cntl;
2052};
2053
2054struct r300_asic {
2055	const unsigned		*reg_safe_bm;
2056	unsigned		reg_safe_bm_size;
2057	u32			resync_scratch;
2058	u32			hdp_cntl;
2059};
2060
2061struct r600_asic {
2062	unsigned		max_pipes;
2063	unsigned		max_tile_pipes;
2064	unsigned		max_simds;
2065	unsigned		max_backends;
2066	unsigned		max_gprs;
2067	unsigned		max_threads;
2068	unsigned		max_stack_entries;
2069	unsigned		max_hw_contexts;
2070	unsigned		max_gs_threads;
2071	unsigned		sx_max_export_size;
2072	unsigned		sx_max_export_pos_size;
2073	unsigned		sx_max_export_smx_size;
2074	unsigned		sq_num_cf_insts;
2075	unsigned		tiling_nbanks;
2076	unsigned		tiling_npipes;
2077	unsigned		tiling_group_size;
2078	unsigned		tile_config;
2079	unsigned		backend_map;
2080	unsigned		active_simds;
2081};
2082
2083struct rv770_asic {
2084	unsigned		max_pipes;
2085	unsigned		max_tile_pipes;
2086	unsigned		max_simds;
2087	unsigned		max_backends;
2088	unsigned		max_gprs;
2089	unsigned		max_threads;
2090	unsigned		max_stack_entries;
2091	unsigned		max_hw_contexts;
2092	unsigned		max_gs_threads;
2093	unsigned		sx_max_export_size;
2094	unsigned		sx_max_export_pos_size;
2095	unsigned		sx_max_export_smx_size;
2096	unsigned		sq_num_cf_insts;
2097	unsigned		sx_num_of_sets;
2098	unsigned		sc_prim_fifo_size;
2099	unsigned		sc_hiz_tile_fifo_size;
2100	unsigned		sc_earlyz_tile_fifo_fize;
2101	unsigned		tiling_nbanks;
2102	unsigned		tiling_npipes;
2103	unsigned		tiling_group_size;
2104	unsigned		tile_config;
2105	unsigned		backend_map;
2106	unsigned		active_simds;
2107};
2108
2109struct evergreen_asic {
2110	unsigned num_ses;
2111	unsigned max_pipes;
2112	unsigned max_tile_pipes;
2113	unsigned max_simds;
2114	unsigned max_backends;
2115	unsigned max_gprs;
2116	unsigned max_threads;
2117	unsigned max_stack_entries;
2118	unsigned max_hw_contexts;
2119	unsigned max_gs_threads;
2120	unsigned sx_max_export_size;
2121	unsigned sx_max_export_pos_size;
2122	unsigned sx_max_export_smx_size;
2123	unsigned sq_num_cf_insts;
2124	unsigned sx_num_of_sets;
2125	unsigned sc_prim_fifo_size;
2126	unsigned sc_hiz_tile_fifo_size;
2127	unsigned sc_earlyz_tile_fifo_size;
2128	unsigned tiling_nbanks;
2129	unsigned tiling_npipes;
2130	unsigned tiling_group_size;
2131	unsigned tile_config;
2132	unsigned backend_map;
2133	unsigned active_simds;
2134};
2135
2136struct cayman_asic {
2137	unsigned max_shader_engines;
2138	unsigned max_pipes_per_simd;
2139	unsigned max_tile_pipes;
2140	unsigned max_simds_per_se;
2141	unsigned max_backends_per_se;
2142	unsigned max_texture_channel_caches;
2143	unsigned max_gprs;
2144	unsigned max_threads;
2145	unsigned max_gs_threads;
2146	unsigned max_stack_entries;
2147	unsigned sx_num_of_sets;
2148	unsigned sx_max_export_size;
2149	unsigned sx_max_export_pos_size;
2150	unsigned sx_max_export_smx_size;
2151	unsigned max_hw_contexts;
2152	unsigned sq_num_cf_insts;
2153	unsigned sc_prim_fifo_size;
2154	unsigned sc_hiz_tile_fifo_size;
2155	unsigned sc_earlyz_tile_fifo_size;
2156
2157	unsigned num_shader_engines;
2158	unsigned num_shader_pipes_per_simd;
2159	unsigned num_tile_pipes;
2160	unsigned num_simds_per_se;
2161	unsigned num_backends_per_se;
2162	unsigned backend_disable_mask_per_asic;
2163	unsigned backend_map;
2164	unsigned num_texture_channel_caches;
2165	unsigned mem_max_burst_length_bytes;
2166	unsigned mem_row_size_in_kb;
2167	unsigned shader_engine_tile_size;
2168	unsigned num_gpus;
2169	unsigned multi_gpu_tile_size;
2170
2171	unsigned tile_config;
2172	unsigned active_simds;
2173};
2174
2175struct si_asic {
2176	unsigned max_shader_engines;
2177	unsigned max_tile_pipes;
2178	unsigned max_cu_per_sh;
2179	unsigned max_sh_per_se;
2180	unsigned max_backends_per_se;
2181	unsigned max_texture_channel_caches;
2182	unsigned max_gprs;
2183	unsigned max_gs_threads;
2184	unsigned max_hw_contexts;
2185	unsigned sc_prim_fifo_size_frontend;
2186	unsigned sc_prim_fifo_size_backend;
2187	unsigned sc_hiz_tile_fifo_size;
2188	unsigned sc_earlyz_tile_fifo_size;
2189
2190	unsigned num_tile_pipes;
2191	unsigned backend_enable_mask;
2192	unsigned backend_disable_mask_per_asic;
2193	unsigned backend_map;
2194	unsigned num_texture_channel_caches;
2195	unsigned mem_max_burst_length_bytes;
2196	unsigned mem_row_size_in_kb;
2197	unsigned shader_engine_tile_size;
2198	unsigned num_gpus;
2199	unsigned multi_gpu_tile_size;
2200
2201	unsigned tile_config;
2202	uint32_t tile_mode_array[32];
2203	uint32_t active_cus;
2204};
2205
2206struct cik_asic {
2207	unsigned max_shader_engines;
2208	unsigned max_tile_pipes;
2209	unsigned max_cu_per_sh;
2210	unsigned max_sh_per_se;
2211	unsigned max_backends_per_se;
2212	unsigned max_texture_channel_caches;
2213	unsigned max_gprs;
2214	unsigned max_gs_threads;
2215	unsigned max_hw_contexts;
2216	unsigned sc_prim_fifo_size_frontend;
2217	unsigned sc_prim_fifo_size_backend;
2218	unsigned sc_hiz_tile_fifo_size;
2219	unsigned sc_earlyz_tile_fifo_size;
2220
2221	unsigned num_tile_pipes;
2222	unsigned backend_enable_mask;
2223	unsigned backend_disable_mask_per_asic;
2224	unsigned backend_map;
2225	unsigned num_texture_channel_caches;
2226	unsigned mem_max_burst_length_bytes;
2227	unsigned mem_row_size_in_kb;
2228	unsigned shader_engine_tile_size;
2229	unsigned num_gpus;
2230	unsigned multi_gpu_tile_size;
2231
2232	unsigned tile_config;
2233	uint32_t tile_mode_array[32];
2234	uint32_t macrotile_mode_array[16];
2235	uint32_t active_cus;
2236};
2237
2238union radeon_asic_config {
2239	struct r300_asic	r300;
2240	struct r100_asic	r100;
2241	struct r600_asic	r600;
2242	struct rv770_asic	rv770;
2243	struct evergreen_asic	evergreen;
2244	struct cayman_asic	cayman;
2245	struct si_asic		si;
2246	struct cik_asic		cik;
2247};
2248
2249/*
2250 * asic initizalization from radeon_asic.c
2251 */
2252void radeon_agp_disable(struct radeon_device *rdev);
2253int radeon_asic_init(struct radeon_device *rdev);
2254
2255
2256/*
2257 * IOCTL.
2258 */
2259int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
2260			  struct drm_file *filp);
2261int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
2262			    struct drm_file *filp);
2263int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
2264			     struct drm_file *filp);
2265int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
2266			 struct drm_file *file_priv);
2267int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
2268			   struct drm_file *file_priv);
2269int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2270			    struct drm_file *file_priv);
2271int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
2272			   struct drm_file *file_priv);
2273int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2274				struct drm_file *filp);
2275int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
2276			  struct drm_file *filp);
2277int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
2278			  struct drm_file *filp);
2279int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
2280			      struct drm_file *filp);
2281int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
2282			  struct drm_file *filp);
2283int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
2284			struct drm_file *filp);
2285int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
2286int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
2287				struct drm_file *filp);
2288int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
2289				struct drm_file *filp);
2290
2291/* VRAM scratch page for HDP bug, default vram page */
2292struct r600_vram_scratch {
2293	struct radeon_bo		*robj;
2294	volatile uint32_t		*ptr;
2295	u64				gpu_addr;
2296};
2297
2298/*
2299 * ACPI
2300 */
2301struct radeon_atif_notification_cfg {
2302	bool enabled;
2303	int command_code;
2304};
2305
2306struct radeon_atif_notifications {
2307	bool display_switch;
2308	bool expansion_mode_change;
2309	bool thermal_state;
2310	bool forced_power_state;
2311	bool system_power_state;
2312	bool display_conf_change;
2313	bool px_gfx_switch;
2314	bool brightness_change;
2315	bool dgpu_display_event;
2316};
2317
2318struct radeon_atif_functions {
2319	bool system_params;
2320	bool sbios_requests;
2321	bool select_active_disp;
2322	bool lid_state;
2323	bool get_tv_standard;
2324	bool set_tv_standard;
2325	bool get_panel_expansion_mode;
2326	bool set_panel_expansion_mode;
2327	bool temperature_change;
2328	bool graphics_device_types;
2329};
2330
2331struct radeon_atif {
2332	struct radeon_atif_notifications notifications;
2333	struct radeon_atif_functions functions;
2334	struct radeon_atif_notification_cfg notification_cfg;
2335	struct radeon_encoder *encoder_for_bl;
2336};
2337
2338struct radeon_atcs_functions {
2339	bool get_ext_state;
2340	bool pcie_perf_req;
2341	bool pcie_dev_rdy;
2342	bool pcie_bus_width;
2343};
2344
2345struct radeon_atcs {
2346	struct radeon_atcs_functions functions;
2347};
2348
2349/*
2350 * Core structure, functions and helpers.
2351 */
2352typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
2353typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
2354
2355struct radeon_device {
2356	struct device			*dev;
2357	struct drm_device		*ddev;
2358	struct pci_dev			*pdev;
2359	struct rw_semaphore		exclusive_lock;
2360	/* ASIC */
2361	union radeon_asic_config	config;
2362	enum radeon_family		family;
2363	unsigned long			flags;
2364	int				usec_timeout;
2365	enum radeon_pll_errata		pll_errata;
2366	int				num_gb_pipes;
2367	int				num_z_pipes;
2368	int				disp_priority;
2369	/* BIOS */
2370	uint8_t				*bios;
2371	bool				is_atom_bios;
2372	uint16_t			bios_header_start;
2373	struct radeon_bo		*stolen_vga_memory;
2374	/* Register mmio */
2375#ifndef __NetBSD__
2376	resource_size_t			rmmio_base;
2377	resource_size_t			rmmio_size;
2378#endif
2379	/* protects concurrent MM_INDEX/DATA based register access */
2380	spinlock_t mmio_idx_lock;
2381	/* protects concurrent SMC based register access */
2382	spinlock_t smc_idx_lock;
2383	/* protects concurrent PLL register access */
2384	spinlock_t pll_idx_lock;
2385	/* protects concurrent MC register access */
2386	spinlock_t mc_idx_lock;
2387	/* protects concurrent PCIE register access */
2388	spinlock_t pcie_idx_lock;
2389	/* protects concurrent PCIE_PORT register access */
2390	spinlock_t pciep_idx_lock;
2391	/* protects concurrent PIF register access */
2392	spinlock_t pif_idx_lock;
2393	/* protects concurrent CG register access */
2394	spinlock_t cg_idx_lock;
2395	/* protects concurrent UVD register access */
2396	spinlock_t uvd_idx_lock;
2397	/* protects concurrent RCU register access */
2398	spinlock_t rcu_idx_lock;
2399	/* protects concurrent DIDT register access */
2400	spinlock_t didt_idx_lock;
2401	/* protects concurrent ENDPOINT (audio) register access */
2402	spinlock_t end_idx_lock;
2403#ifdef __NetBSD__
2404	bus_space_tag_t			rmmio_bst;
2405	bus_space_handle_t		rmmio_bsh;
2406	bus_addr_t			rmmio_addr;
2407	bus_size_t			rmmio_size;
2408#else
2409	void __iomem			*rmmio;
2410#endif
2411	radeon_rreg_t			mc_rreg;
2412	radeon_wreg_t			mc_wreg;
2413	radeon_rreg_t			pll_rreg;
2414	radeon_wreg_t			pll_wreg;
2415	uint32_t                        pcie_reg_mask;
2416	radeon_rreg_t			pciep_rreg;
2417	radeon_wreg_t			pciep_wreg;
2418	/* io port */
2419#ifdef __NetBSD__
2420	bus_space_tag_t			rio_mem_bst;
2421	bus_space_handle_t		rio_mem_bsh;
2422	bus_size_t			rio_mem_size;
2423#else
2424	void __iomem                    *rio_mem;
2425	resource_size_t			rio_mem_size;
2426#endif
2427	struct radeon_clock             clock;
2428	struct radeon_mc		mc;
2429	struct radeon_gart		gart;
2430	struct radeon_mode_info		mode_info;
2431	struct radeon_scratch		scratch;
2432	struct radeon_doorbell		doorbell;
2433	struct radeon_mman		mman;
2434	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
2435	spinlock_t			fence_lock;
2436	drm_waitqueue_t			fence_queue;
2437	TAILQ_HEAD(, radeon_fence)	fence_check;
2438	u64				fence_context;
2439	struct mutex			ring_lock;
2440	struct radeon_ring		ring[RADEON_NUM_RINGS];
2441	bool				ib_pool_ready;
2442	struct radeon_sa_manager	ring_tmp_bo;
2443	struct radeon_irq		irq;
2444	struct radeon_asic		*asic;
2445	struct radeon_gem		gem;
2446	struct radeon_pm		pm;
2447	struct radeon_uvd		uvd;
2448	struct radeon_vce		vce;
2449	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
2450	struct radeon_wb		wb;
2451	struct radeon_dummy_page	dummy_page;
2452	bool				shutdown;
2453	bool				need_swiotlb;
2454	bool				accel_working;
2455	bool				fastfb_working; /* IGP feature*/
2456	bool				needs_reset, in_reset;
2457	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
2458	const struct firmware *me_fw;	/* all family ME firmware */
2459	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
2460	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
2461	const struct firmware *mc_fw;	/* NI MC firmware */
2462	const struct firmware *ce_fw;	/* SI CE firmware */
2463	const struct firmware *mec_fw;	/* CIK MEC firmware */
2464	const struct firmware *mec2_fw;	/* KV MEC2 firmware */
2465	const struct firmware *sdma_fw;	/* CIK SDMA firmware */
2466	const struct firmware *smc_fw;	/* SMC firmware */
2467	const struct firmware *uvd_fw;	/* UVD firmware */
2468	const struct firmware *vce_fw;	/* VCE firmware */
2469	bool new_fw;
2470	struct r600_vram_scratch vram_scratch;
2471	int msi_enabled; /* msi enabled */
2472	struct r600_ih ih; /* r6/700 interrupt ring */
2473	struct radeon_rlc rlc;
2474	struct radeon_mec mec;
2475	struct delayed_work hotplug_work;
2476	struct work_struct dp_work;
2477	struct work_struct audio_work;
2478	int num_crtc; /* number of crtcs */
2479	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
2480	bool has_uvd;
2481	bool has_vce;
2482	struct r600_audio audio; /* audio stuff */
2483	struct notifier_block acpi_nb;
2484	/* only one userspace can use Hyperz features or CMASK at a time */
2485	struct drm_file *hyperz_filp;
2486	struct drm_file *cmask_filp;
2487	/* i2c buses */
2488	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
2489	/* debugfs */
2490	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
2491	unsigned 		debugfs_count;
2492	/* virtual memory */
2493	struct radeon_vm_manager	vm_manager;
2494	struct mutex			gpu_clock_mutex;
2495	/* memory stats */
2496	atomic64_t			vram_usage;
2497	atomic64_t			gtt_usage;
2498	atomic64_t			num_bytes_moved;
2499	atomic_t			gpu_reset_counter;
2500	/* ACPI interface */
2501	struct radeon_atif		atif;
2502	struct radeon_atcs		atcs;
2503	/* srbm instance registers */
2504	struct mutex			srbm_mutex;
2505	/* clock, powergating flags */
2506	u32 cg_flags;
2507	u32 pg_flags;
2508
2509	struct dev_pm_domain vga_pm_domain;
2510	bool have_disp_power_ref;
2511	u32 px_quirk_flags;
2512
2513	/* tracking pinned memory */
2514	u64 vram_pin_size;
2515	u64 gart_pin_size;
2516};
2517
2518bool radeon_is_px(struct drm_device *dev);
2519int radeon_device_init(struct radeon_device *rdev,
2520		       struct drm_device *ddev,
2521		       struct pci_dev *pdev,
2522		       uint32_t flags);
2523void radeon_device_fini(struct radeon_device *rdev);
2524int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
2525
2526#define RADEON_MIN_MMIO_SIZE 0x10000
2527
2528uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg);
2529void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v);
2530static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
2531				    bool always_indirect)
2532{
2533	/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
2534	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
2535#ifdef __NetBSD__
2536		return bus_space_read_4(rdev->rmmio_bst, rdev->rmmio_bsh, reg);
2537#else
2538		return readl(((void __iomem *)rdev->rmmio) + reg);
2539#endif
2540	else
2541		return r100_mm_rreg_slow(rdev, reg);
2542}
2543static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
2544				bool always_indirect)
2545{
2546	if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
2547#ifdef __NetBSD__
2548		bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh, reg, v);
2549#else
2550		writel(v, ((void __iomem *)rdev->rmmio) + reg);
2551#endif
2552	else
2553		r100_mm_wreg_slow(rdev, reg, v);
2554}
2555
2556u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
2557void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2558
2559u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
2560void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
2561
2562/*
2563 * Cast helper
2564 */
2565extern const struct dma_fence_ops radeon_fence_ops;
2566
2567static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
2568{
2569	struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
2570
2571	if (__f->base.ops == &radeon_fence_ops)
2572		return __f;
2573
2574	return NULL;
2575}
2576
2577/*
2578 * Registers read & write functions.
2579 */
2580#ifdef __NetBSD__
2581#define	RREG8(r) bus_space_read_1(rdev->rmmio_bst, rdev->rmmio_bsh, (r))
2582#define	WREG8(r, v) bus_space_write_1(rdev->rmmio_bst, rdev->rmmio_bsh, (r), (v))
2583#define	RREG16(r) bus_space_read_2(rdev->rmmio_bst, rdev->rmmio_bsh, (r))
2584#define	WREG16(r, v) bus_space_write_2(rdev->rmmio_bst, rdev->rmmio_bsh, (r), (v))
2585#else
2586#define RREG8(reg) readb((rdev->rmmio) + (reg))
2587#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
2588#define RREG16(reg) readw((rdev->rmmio) + (reg))
2589#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
2590#endif
2591#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
2592#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
2593#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n",	\
2594			    r100_mm_rreg(rdev, (reg), false))
2595#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
2596#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
2597#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2598#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2599#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
2600#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
2601#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
2602#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
2603#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
2604#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
2605#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
2606#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
2607#define RREG32_SMC(reg) tn_smc_rreg(rdev, (reg))
2608#define WREG32_SMC(reg, v) tn_smc_wreg(rdev, (reg), (v))
2609#define RREG32_RCU(reg) r600_rcu_rreg(rdev, (reg))
2610#define WREG32_RCU(reg, v) r600_rcu_wreg(rdev, (reg), (v))
2611#define RREG32_CG(reg) eg_cg_rreg(rdev, (reg))
2612#define WREG32_CG(reg, v) eg_cg_wreg(rdev, (reg), (v))
2613#define RREG32_PIF_PHY0(reg) eg_pif_phy0_rreg(rdev, (reg))
2614#define WREG32_PIF_PHY0(reg, v) eg_pif_phy0_wreg(rdev, (reg), (v))
2615#define RREG32_PIF_PHY1(reg) eg_pif_phy1_rreg(rdev, (reg))
2616#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
2617#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
2618#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
2619#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
2620#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
2621#define WREG32_P(reg, val, mask)				\
2622	do {							\
2623		uint32_t tmp_ = RREG32(reg);			\
2624		tmp_ &= (mask);					\
2625		tmp_ |= ((val) & ~(mask));			\
2626		WREG32(reg, tmp_);				\
2627	} while (0)
2628#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2629#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2630#define WREG32_PLL_P(reg, val, mask)				\
2631	do {							\
2632		uint32_t tmp_ = RREG32_PLL(reg);		\
2633		tmp_ &= (mask);					\
2634		tmp_ |= ((val) & ~(mask));			\
2635		WREG32_PLL(reg, tmp_);				\
2636	} while (0)
2637#define WREG32_SMC_P(reg, val, mask)				\
2638	do {							\
2639		uint32_t tmp_ = RREG32_SMC(reg);		\
2640		tmp_ &= (mask);					\
2641		tmp_ |= ((val) & ~(mask));			\
2642		WREG32_SMC(reg, tmp_);				\
2643	} while (0)
2644#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
2645#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
2646#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
2647
2648#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
2649#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
2650
2651/*
2652 * Indirect registers accessors.
2653 * They used to be inlined, but this increases code size by ~65 kbytes.
2654 * Since each performs a pair of MMIO ops
2655 * within a spin_lock_irqsave/spin_unlock_irqrestore region,
2656 * the cost of call+ret is almost negligible. MMIO and locking
2657 * costs several dozens of cycles each at best, call+ret is ~5 cycles.
2658 */
2659uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
2660void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
2661u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg);
2662void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2663u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg);
2664void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2665u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg);
2666void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2667u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg);
2668void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2669u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg);
2670void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2671u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg);
2672void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2673u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg);
2674void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2675
2676void r100_pll_errata_after_index(struct radeon_device *rdev);
2677
2678
2679/*
2680 * ASICs helpers.
2681 */
2682#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
2683			    (rdev->pdev->device == 0x5969))
2684#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
2685		(rdev->family == CHIP_RV200) || \
2686		(rdev->family == CHIP_RS100) || \
2687		(rdev->family == CHIP_RS200) || \
2688		(rdev->family == CHIP_RV250) || \
2689		(rdev->family == CHIP_RV280) || \
2690		(rdev->family == CHIP_RS300))
2691#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300)  ||	\
2692		(rdev->family == CHIP_RV350) ||			\
2693		(rdev->family == CHIP_R350)  ||			\
2694		(rdev->family == CHIP_RV380) ||			\
2695		(rdev->family == CHIP_R420)  ||			\
2696		(rdev->family == CHIP_R423)  ||			\
2697		(rdev->family == CHIP_RV410) ||			\
2698		(rdev->family == CHIP_RS400) ||			\
2699		(rdev->family == CHIP_RS480))
2700#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
2701		(rdev->ddev->pdev->device == 0x9443) || \
2702		(rdev->ddev->pdev->device == 0x944B) || \
2703		(rdev->ddev->pdev->device == 0x9506) || \
2704		(rdev->ddev->pdev->device == 0x9509) || \
2705		(rdev->ddev->pdev->device == 0x950F) || \
2706		(rdev->ddev->pdev->device == 0x689C) || \
2707		(rdev->ddev->pdev->device == 0x689D))
2708#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
2709#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
2710			    (rdev->family == CHIP_RS690)  ||	\
2711			    (rdev->family == CHIP_RS740)  ||	\
2712			    (rdev->family >= CHIP_R600))
2713#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
2714#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
2715#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
2716#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
2717			     (rdev->flags & RADEON_IS_IGP))
2718#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
2719#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
2720#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
2721			     (rdev->flags & RADEON_IS_IGP))
2722#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
2723#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
2724#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
2725#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
2726#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
2727#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
2728			     (rdev->family == CHIP_MULLINS))
2729
2730#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
2731			      (rdev->ddev->pdev->device == 0x6850) || \
2732			      (rdev->ddev->pdev->device == 0x6858) || \
2733			      (rdev->ddev->pdev->device == 0x6859) || \
2734			      (rdev->ddev->pdev->device == 0x6840) || \
2735			      (rdev->ddev->pdev->device == 0x6841) || \
2736			      (rdev->ddev->pdev->device == 0x6842) || \
2737			      (rdev->ddev->pdev->device == 0x6843))
2738
2739/*
2740 * BIOS helpers.
2741 */
2742#define RBIOS8(i) (rdev->bios[i])
2743#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
2744#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
2745
2746int radeon_combios_init(struct radeon_device *rdev);
2747void radeon_combios_fini(struct radeon_device *rdev);
2748int radeon_atombios_init(struct radeon_device *rdev);
2749void radeon_atombios_fini(struct radeon_device *rdev);
2750
2751
2752/*
2753 * RING helpers.
2754 */
2755
2756/**
2757 * radeon_ring_write - write a value to the ring
2758 *
2759 * @ring: radeon_ring structure holding ring information
2760 * @v: dword (dw) value to write
2761 *
2762 * Write a value to the requested ring buffer (all asics).
2763 */
2764static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
2765{
2766	if (ring->count_dw <= 0)
2767		DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
2768
2769	ring->ring[ring->wptr++] = v;
2770	ring->wptr &= ring->ptr_mask;
2771	ring->count_dw--;
2772	ring->ring_free_dw--;
2773}
2774
2775/*
2776 * ASICs macro.
2777 */
2778#define radeon_init(rdev) (rdev)->asic->init((rdev))
2779#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
2780#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
2781#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
2782#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
2783#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
2784#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev), false)
2785#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
2786#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
2787#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
2788#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
2789#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
2790#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
2791#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2792#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2793#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
2794#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
2795#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
2796#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
2797#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
2798#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
2799#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
2800#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
2801#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
2802#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
2803#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
2804#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
2805#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
2806#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
2807#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
2808#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
2809#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
2810#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
2811#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
2812#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
2813#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
2814#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
2815#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
2816#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
2817#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
2818#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
2819#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
2820#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
2821#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
2822#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
2823#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
2824#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
2825#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
2826#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
2827#define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
2828#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
2829#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
2830#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
2831#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
2832#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
2833#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
2834#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
2835#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
2836#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
2837#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
2838#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
2839#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
2840#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
2841#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
2842#define radeon_page_flip(rdev, crtc, base, async) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (async))
2843#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
2844#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
2845#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
2846#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
2847#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
2848#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v))
2849#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
2850#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
2851#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
2852#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
2853#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
2854#define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
2855#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
2856#define radeon_dpm_post_set_power_state(rdev) rdev->asic->dpm.post_set_power_state((rdev))
2857#define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev))
2858#define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev))
2859#define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l))
2860#define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l))
2861#define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps))
2862#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
2863#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
2864#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
2865#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
2866#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
2867#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev))
2868#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev))
2869
2870/* Common functions */
2871/* AGP */
2872extern int radeon_gpu_reset(struct radeon_device *rdev);
2873extern void radeon_pci_config_reset(struct radeon_device *rdev);
2874extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
2875extern void radeon_agp_disable(struct radeon_device *rdev);
2876extern int radeon_modeset_init(struct radeon_device *rdev);
2877extern void radeon_modeset_fini(struct radeon_device *rdev);
2878extern bool radeon_card_posted(struct radeon_device *rdev);
2879extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
2880extern void radeon_update_display_priority(struct radeon_device *rdev);
2881extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
2882extern void radeon_scratch_init(struct radeon_device *rdev);
2883extern void radeon_wb_fini(struct radeon_device *rdev);
2884extern int radeon_wb_init(struct radeon_device *rdev);
2885extern void radeon_wb_disable(struct radeon_device *rdev);
2886extern void radeon_surface_init(struct radeon_device *rdev);
2887extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
2888extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
2889extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
2890extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
2891extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
2892extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2893				     uint32_t flags);
2894extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
2895extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
2896extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
2897extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
2898extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2899extern int radeon_suspend_kms(struct drm_device *dev, bool suspend,
2900			      bool fbcon, bool freeze);
2901extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
2902extern void radeon_program_register_sequence(struct radeon_device *rdev,
2903					     const u32 *registers,
2904					     const u32 array_size);
2905
2906/*
2907 * vm
2908 */
2909int radeon_vm_manager_init(struct radeon_device *rdev);
2910void radeon_vm_manager_fini(struct radeon_device *rdev);
2911int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
2912void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
2913struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
2914					  struct radeon_vm *vm,
2915                                          struct list_head *head);
2916struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
2917				       struct radeon_vm *vm, int ring);
2918void radeon_vm_flush(struct radeon_device *rdev,
2919                     struct radeon_vm *vm,
2920		     int ring, struct radeon_fence *fence);
2921void radeon_vm_fence(struct radeon_device *rdev,
2922		     struct radeon_vm *vm,
2923		     struct radeon_fence *fence);
2924uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2925int radeon_vm_update_page_directory(struct radeon_device *rdev,
2926				    struct radeon_vm *vm);
2927int radeon_vm_clear_freed(struct radeon_device *rdev,
2928			  struct radeon_vm *vm);
2929int radeon_vm_clear_invalids(struct radeon_device *rdev,
2930			     struct radeon_vm *vm);
2931int radeon_vm_bo_update(struct radeon_device *rdev,
2932			struct radeon_bo_va *bo_va,
2933			struct ttm_mem_reg *mem);
2934void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2935			     struct radeon_bo *bo);
2936struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
2937				       struct radeon_bo *bo);
2938struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
2939				      struct radeon_vm *vm,
2940				      struct radeon_bo *bo);
2941int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2942			  struct radeon_bo_va *bo_va,
2943			  uint64_t offset,
2944			  uint32_t flags);
2945void radeon_vm_bo_rmv(struct radeon_device *rdev,
2946		      struct radeon_bo_va *bo_va);
2947
2948/* audio */
2949void r600_audio_update_hdmi(struct work_struct *work);
2950struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
2951struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
2952void r600_audio_enable(struct radeon_device *rdev,
2953		       struct r600_audio_pin *pin,
2954		       u8 enable_mask);
2955void dce6_audio_enable(struct radeon_device *rdev,
2956		       struct r600_audio_pin *pin,
2957		       u8 enable_mask);
2958
2959/*
2960 * R600 vram scratch functions
2961 */
2962int r600_vram_scratch_init(struct radeon_device *rdev);
2963void r600_vram_scratch_fini(struct radeon_device *rdev);
2964
2965/*
2966 * r600 cs checking helper
2967 */
2968unsigned r600_mip_minify(unsigned size, unsigned level);
2969bool r600_fmt_is_valid_color(u32 format);
2970bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
2971int r600_fmt_get_blocksize(u32 format);
2972int r600_fmt_get_nblocksx(u32 format, u32 w);
2973int r600_fmt_get_nblocksy(u32 format, u32 h);
2974
2975/*
2976 * r600 functions used by radeon_encoder.c
2977 */
2978struct radeon_hdmi_acr {
2979	u32 clock;
2980
2981	int n_32khz;
2982	int cts_32khz;
2983
2984	int n_44_1khz;
2985	int cts_44_1khz;
2986
2987	int n_48khz;
2988	int cts_48khz;
2989
2990};
2991
2992extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
2993
2994extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
2995				     u32 tiling_pipe_num,
2996				     u32 max_rb_num,
2997				     u32 total_max_rb_num,
2998				     u32 enabled_rb_mask);
2999
3000/*
3001 * evergreen functions used by radeon_encoder.c
3002 */
3003
3004extern int ni_init_microcode(struct radeon_device *rdev);
3005extern int ni_mc_load_microcode(struct radeon_device *rdev);
3006
3007/* radeon_acpi.c */
3008#if defined(CONFIG_ACPI)
3009extern int radeon_acpi_init(struct radeon_device *rdev);
3010extern void radeon_acpi_fini(struct radeon_device *rdev);
3011extern bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
3012extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
3013						u8 perf_req, bool advertise);
3014extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
3015#else
3016static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
3017static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
3018#endif
3019
3020int radeon_cs_packet_parse(struct radeon_cs_parser *p,
3021			   struct radeon_cs_packet *pkt,
3022			   unsigned idx);
3023bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
3024void radeon_cs_dump_packet(struct radeon_cs_parser *p,
3025			   struct radeon_cs_packet *pkt);
3026int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
3027				struct radeon_bo_list **cs_reloc,
3028				int nomm);
3029int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
3030			       uint32_t *vline_start_end,
3031			       uint32_t *vline_status);
3032
3033/* interrupt control register helpers */
3034void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
3035				      u32 reg, u32 mask,
3036				      bool enable, const char *name,
3037				      unsigned n);
3038
3039#include "radeon_object.h"
3040
3041#endif
3042