1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 *          Alex Deucher
26 *          Jerome Glisse
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#ifndef __RADEON_H__
33#define __RADEON_H__
34
35/* TODO: Here are things that needs to be done :
36 *	- surface allocator & initializer : (bit like scratch reg) should
37 *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
38 *	  related to surface
39 *	- WB : write back stuff (do it bit like scratch reg things)
40 *	- Vblank : look at Jesse's rework and what we should do
41 *	- r600/r700: gart & cp
42 *	- cs : clean cs ioctl use bitmap & things like that.
43 *	- power management stuff
44 *	- Barrier in gart code
45 *	- Unmappabled vram ?
46 *	- TESTING, TESTING, TESTING
47 */
48
49/* Initialization path:
50 *  We expect that acceleration initialization might fail for various
51 *  reasons even thought we work hard to make it works on most
52 *  configurations. In order to still have a working userspace in such
53 *  situation the init path must succeed up to the memory controller
54 *  initialization point. Failure before this point are considered as
55 *  fatal error. Here is the init callchain :
56 *      radeon_device_init  perform common structure, mutex initialization
57 *      asic_init           setup the GPU memory layout and perform all
58 *                          one time initialization (failure in this
59 *                          function are considered fatal)
60 *      asic_startup        setup the GPU acceleration, in order to
61 *                          follow guideline the first thing this
62 *                          function should do is setting the GPU
63 *                          memory controller (only MC setup failure
64 *                          are considered as fatal)
65 */
66
67#include <sys/cdefs.h>
68__FBSDID("$FreeBSD$");
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/linker.h>
73#include <sys/firmware.h>
74
75#if defined(CONFIG_ACPI)
76#include <contrib/dev/acpica/include/acpi.h>
77#include <dev/acpica/acpivar.h>
78#endif
79
80#include <dev/drm2/ttm/ttm_bo_api.h>
81#include <dev/drm2/ttm/ttm_bo_driver.h>
82#include <dev/drm2/ttm/ttm_placement.h>
83#include <dev/drm2/ttm/ttm_module.h>
84#include <dev/drm2/ttm/ttm_execbuf_util.h>
85
86#include "radeon_family.h"
87#include "radeon_mode.h"
88#include "radeon_reg.h"
89
90/*
91 * Modules parameters.
92 */
93extern int radeon_no_wb;
94extern int radeon_modeset;
95extern int radeon_dynclks;
96extern int radeon_r4xx_atom;
97extern int radeon_agpmode;
98extern int radeon_vram_limit;
99extern int radeon_gart_size;
100extern int radeon_benchmarking;
101extern int radeon_testing;
102extern int radeon_connector_table;
103extern int radeon_tv;
104extern int radeon_audio;
105extern int radeon_disp_priority;
106extern int radeon_hw_i2c;
107extern int radeon_pcie_gen2;
108extern int radeon_msi;
109extern int radeon_lockup_timeout;
110
111/*
112 * Copy from radeon_drv.h so we don't have to include both and have conflicting
113 * symbol;
114 */
115#define RADEON_MAX_USEC_TIMEOUT			100000	/* 100 ms */
116#define RADEON_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
117/* RADEON_IB_POOL_SIZE must be a power of 2 */
118#define RADEON_IB_POOL_SIZE			16
119#define RADEON_DEBUGFS_MAX_COMPONENTS		32
120#define RADEONFB_CONN_LIMIT			4
121#define RADEON_BIOS_NUM_SCRATCH			8
122
123/* max number of rings */
124#define RADEON_NUM_RINGS			5
125
126/* fence seq are set to this number when signaled */
127#define RADEON_FENCE_SIGNALED_SEQ		0LL
128
129/* internal ring indices */
130/* r1xx+ has gfx CP ring */
131#define RADEON_RING_TYPE_GFX_INDEX		0
132
133/* cayman has 2 compute CP rings */
134#define CAYMAN_RING_TYPE_CP1_INDEX		1
135#define CAYMAN_RING_TYPE_CP2_INDEX		2
136
137/* R600+ has an async dma ring */
138#define R600_RING_TYPE_DMA_INDEX		3
139/* cayman add a second async dma ring */
140#define CAYMAN_RING_TYPE_DMA1_INDEX		4
141
142/* hardcode those limit for now */
143#define RADEON_VA_IB_OFFSET			(1 << 20)
144#define RADEON_VA_RESERVED_SIZE			(8 << 20)
145#define RADEON_IB_VM_MAX_SIZE			(64 << 10)
146
147/* reset flags */
148#define RADEON_RESET_GFX			(1 << 0)
149#define RADEON_RESET_COMPUTE			(1 << 1)
150#define RADEON_RESET_DMA			(1 << 2)
151
152/*
153 * Errata workarounds.
154 */
155enum radeon_pll_errata {
156	CHIP_ERRATA_R300_CG             = 0x00000001,
157	CHIP_ERRATA_PLL_DUMMYREADS      = 0x00000002,
158	CHIP_ERRATA_PLL_DELAY           = 0x00000004
159};
160
161
162struct radeon_device;
163
164
165/*
166 * BIOS.
167 */
168bool radeon_get_bios(struct radeon_device *rdev);
169
170/*
171 * Dummy page
172 */
173struct radeon_dummy_page {
174	drm_dma_handle_t *dmah;
175	dma_addr_t	addr;
176};
177int radeon_dummy_page_init(struct radeon_device *rdev);
178void radeon_dummy_page_fini(struct radeon_device *rdev);
179
180
181/*
182 * Clocks
183 */
184struct radeon_clock {
185	struct radeon_pll p1pll;
186	struct radeon_pll p2pll;
187	struct radeon_pll dcpll;
188	struct radeon_pll spll;
189	struct radeon_pll mpll;
190	/* 10 Khz units */
191	uint32_t default_mclk;
192	uint32_t default_sclk;
193	uint32_t default_dispclk;
194	uint32_t dp_extclk;
195	uint32_t max_pixel_clock;
196};
197
198/*
199 * Power management
200 */
201int radeon_pm_init(struct radeon_device *rdev);
202void radeon_pm_fini(struct radeon_device *rdev);
203void radeon_pm_compute_clocks(struct radeon_device *rdev);
204void radeon_pm_suspend(struct radeon_device *rdev);
205void radeon_pm_resume(struct radeon_device *rdev);
206void radeon_combios_get_power_modes(struct radeon_device *rdev);
207void radeon_atombios_get_power_modes(struct radeon_device *rdev);
208void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
209void rs690_pm_info(struct radeon_device *rdev);
210extern int rv6xx_get_temp(struct radeon_device *rdev);
211extern int rv770_get_temp(struct radeon_device *rdev);
212extern int evergreen_get_temp(struct radeon_device *rdev);
213extern int sumo_get_temp(struct radeon_device *rdev);
214extern int si_get_temp(struct radeon_device *rdev);
215extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
216				    unsigned *bankh, unsigned *mtaspect,
217				    unsigned *tile_split);
218
219/*
220 * Fences.
221 */
222struct radeon_fence_driver {
223	uint32_t			scratch_reg;
224	uint64_t			gpu_addr;
225	volatile uint32_t		*cpu_addr;
226	/* sync_seq is protected by ring emission lock */
227	uint64_t			sync_seq[RADEON_NUM_RINGS];
228	atomic64_t			last_seq;
229	unsigned long			last_activity;
230	bool				initialized;
231};
232
233struct radeon_fence {
234	struct radeon_device		*rdev;
235	unsigned int			kref;
236	/* protected by radeon_fence.lock */
237	uint64_t			seq;
238	/* RB, DMA, etc. */
239	unsigned			ring;
240};
241
242int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
243int radeon_fence_driver_init(struct radeon_device *rdev);
244void radeon_fence_driver_fini(struct radeon_device *rdev);
245void radeon_fence_driver_force_completion(struct radeon_device *rdev);
246int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
247void radeon_fence_process(struct radeon_device *rdev, int ring);
248bool radeon_fence_signaled(struct radeon_fence *fence);
249int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
250int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
251int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
252int radeon_fence_wait_any(struct radeon_device *rdev,
253			  struct radeon_fence **fences,
254			  bool intr);
255struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
256void radeon_fence_unref(struct radeon_fence **fence);
257unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
258bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
259void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
260static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
261						      struct radeon_fence *b)
262{
263	if (!a) {
264		return b;
265	}
266
267	if (!b) {
268		return a;
269	}
270
271	KASSERT(a->ring == b->ring, ("\"a\" and \"b\" belongs to different rings"));
272
273	if (a->seq > b->seq) {
274		return a;
275	} else {
276		return b;
277	}
278}
279
280static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
281					   struct radeon_fence *b)
282{
283	if (!a) {
284		return false;
285	}
286
287	if (!b) {
288		return true;
289	}
290
291	KASSERT(a->ring == b->ring, ("\"a\" and \"b\" belongs to different rings"));
292
293	return a->seq < b->seq;
294}
295
296/*
297 * Tiling registers
298 */
299struct radeon_surface_reg {
300	struct radeon_bo *bo;
301};
302
303#define RADEON_GEM_MAX_SURFACES 8
304
305/*
306 * TTM.
307 */
308struct radeon_mman {
309	struct ttm_bo_global_ref        bo_global_ref;
310	struct drm_global_reference	mem_global_ref;
311	struct ttm_bo_device		bdev;
312	bool				mem_global_referenced;
313	bool				initialized;
314};
315
316/* bo virtual address in a specific vm */
317struct radeon_bo_va {
318	/* protected by bo being reserved */
319	struct list_head		bo_list;
320	uint64_t			soffset;
321	uint64_t			eoffset;
322	uint32_t			flags;
323	bool				valid;
324	unsigned			ref_count;
325
326	/* protected by vm mutex */
327	struct list_head		vm_list;
328
329	/* constant after initialization */
330	struct radeon_vm		*vm;
331	struct radeon_bo		*bo;
332};
333
334struct radeon_bo {
335	/* Protected by gem.mutex */
336	struct list_head		list;
337	/* Protected by tbo.reserved */
338	u32				placements[3];
339	struct ttm_placement		placement;
340	struct ttm_buffer_object	tbo;
341	struct ttm_bo_kmap_obj		kmap;
342	unsigned			pin_count;
343	void				*kptr;
344	u32				tiling_flags;
345	u32				pitch;
346	int				surface_reg;
347	/* list of all virtual address to which this bo
348	 * is associated to
349	 */
350	struct list_head		va;
351	/* Constant after initialization */
352	struct radeon_device		*rdev;
353	struct drm_gem_object		gem_base;
354
355	struct ttm_bo_kmap_obj dma_buf_vmap;
356	int vmapping_count;
357};
358#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
359
360struct radeon_bo_list {
361	struct ttm_validate_buffer tv;
362	struct radeon_bo	*bo;
363	uint64_t		gpu_offset;
364	unsigned		rdomain;
365	unsigned		wdomain;
366	u32			tiling_flags;
367};
368
369/* sub-allocation manager, it has to be protected by another lock.
370 * By conception this is an helper for other part of the driver
371 * like the indirect buffer or semaphore, which both have their
372 * locking.
373 *
374 * Principe is simple, we keep a list of sub allocation in offset
375 * order (first entry has offset == 0, last entry has the highest
376 * offset).
377 *
378 * When allocating new object we first check if there is room at
379 * the end total_size - (last_object_offset + last_object_size) >=
380 * alloc_size. If so we allocate new object there.
381 *
382 * When there is not enough room at the end, we start waiting for
383 * each sub object until we reach object_offset+object_size >=
384 * alloc_size, this object then become the sub object we return.
385 *
386 * Alignment can't be bigger than page size.
387 *
388 * Hole are not considered for allocation to keep things simple.
389 * Assumption is that there won't be hole (all object on same
390 * alignment).
391 */
392struct radeon_sa_manager {
393	struct cv		wq;
394	struct sx		wq_lock;
395	struct radeon_bo	*bo;
396	struct list_head	*hole;
397	struct list_head	flist[RADEON_NUM_RINGS];
398	struct list_head	olist;
399	unsigned		size;
400	uint64_t		gpu_addr;
401	void			*cpu_ptr;
402	uint32_t		domain;
403};
404
405struct radeon_sa_bo;
406
407/* sub-allocation buffer */
408struct radeon_sa_bo {
409	struct list_head		olist;
410	struct list_head		flist;
411	struct radeon_sa_manager	*manager;
412	unsigned			soffset;
413	unsigned			eoffset;
414	struct radeon_fence		*fence;
415};
416
417/*
418 * GEM objects.
419 */
420struct radeon_gem {
421	struct sx		mutex;
422	struct list_head	objects;
423};
424
425int radeon_gem_init(struct radeon_device *rdev);
426void radeon_gem_fini(struct radeon_device *rdev);
427int radeon_gem_object_create(struct radeon_device *rdev, int size,
428				int alignment, int initial_domain,
429				bool discardable, bool kernel,
430				struct drm_gem_object **obj);
431
432int radeon_mode_dumb_create(struct drm_file *file_priv,
433			    struct drm_device *dev,
434			    struct drm_mode_create_dumb *args);
435int radeon_mode_dumb_mmap(struct drm_file *filp,
436			  struct drm_device *dev,
437			  uint32_t handle, uint64_t *offset_p);
438int radeon_mode_dumb_destroy(struct drm_file *file_priv,
439			     struct drm_device *dev,
440			     uint32_t handle);
441
442/*
443 * Semaphores.
444 */
445/* everything here is constant */
446struct radeon_semaphore {
447	struct radeon_sa_bo		*sa_bo;
448	signed				waiters;
449	uint64_t			gpu_addr;
450};
451
452int radeon_semaphore_create(struct radeon_device *rdev,
453			    struct radeon_semaphore **semaphore);
454void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
455				  struct radeon_semaphore *semaphore);
456void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
457				struct radeon_semaphore *semaphore);
458int radeon_semaphore_sync_rings(struct radeon_device *rdev,
459				struct radeon_semaphore *semaphore,
460				int signaler, int waiter);
461void radeon_semaphore_free(struct radeon_device *rdev,
462			   struct radeon_semaphore **semaphore,
463			   struct radeon_fence *fence);
464
465/*
466 * GART structures, functions & helpers
467 */
468struct radeon_mc;
469
470#define RADEON_GPU_PAGE_SIZE 4096
471#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
472#define RADEON_GPU_PAGE_SHIFT 12
473#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
474
475struct radeon_gart {
476	drm_dma_handle_t		*dmah;
477	dma_addr_t			table_addr;
478	struct radeon_bo		*robj;
479	void				*ptr;
480	unsigned			num_gpu_pages;
481	unsigned			num_cpu_pages;
482	unsigned			table_size;
483	vm_page_t			*pages;
484	dma_addr_t			*pages_addr;
485	bool				ready;
486};
487
488int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
489void radeon_gart_table_ram_free(struct radeon_device *rdev);
490int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
491void radeon_gart_table_vram_free(struct radeon_device *rdev);
492int radeon_gart_table_vram_pin(struct radeon_device *rdev);
493void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
494int radeon_gart_init(struct radeon_device *rdev);
495void radeon_gart_fini(struct radeon_device *rdev);
496void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
497			int pages);
498int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
499		     int pages, vm_page_t *pagelist,
500		     dma_addr_t *dma_addr);
501void radeon_gart_restore(struct radeon_device *rdev);
502
503
504/*
505 * GPU MC structures, functions & helpers
506 */
507struct radeon_mc {
508	resource_size_t		aper_size;
509	resource_size_t		aper_base;
510	resource_size_t		agp_base;
511	/* for some chips with <= 32MB we need to lie
512	 * about vram size near mc fb location */
513	u64			mc_vram_size;
514	u64			visible_vram_size;
515	u64			gtt_size;
516	u64			gtt_start;
517	u64			gtt_end;
518	u64			vram_start;
519	u64			vram_end;
520	unsigned		vram_width;
521	u64			real_vram_size;
522	int			vram_mtrr;
523	bool			vram_is_ddr;
524	bool			igp_sideport_enabled;
525	u64                     gtt_base_align;
526};
527
528bool radeon_combios_sideport_present(struct radeon_device *rdev);
529bool radeon_atombios_sideport_present(struct radeon_device *rdev);
530
531/*
532 * GPU scratch registers structures, functions & helpers
533 */
534struct radeon_scratch {
535	unsigned		num_reg;
536	uint32_t                reg_base;
537	bool			free[32];
538	uint32_t		reg[32];
539};
540
541int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
542void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
543
544
545/*
546 * IRQS.
547 */
548
549struct radeon_unpin_work {
550	struct task work;
551	struct radeon_device *rdev;
552	int crtc_id;
553	struct radeon_fence *fence;
554	struct drm_pending_vblank_event *event;
555	struct radeon_bo *old_rbo;
556	u64 new_crtc_base;
557};
558
559struct r500_irq_stat_regs {
560	u32 disp_int;
561	u32 hdmi0_status;
562};
563
564struct r600_irq_stat_regs {
565	u32 disp_int;
566	u32 disp_int_cont;
567	u32 disp_int_cont2;
568	u32 d1grph_int;
569	u32 d2grph_int;
570	u32 hdmi0_status;
571	u32 hdmi1_status;
572};
573
574struct evergreen_irq_stat_regs {
575	u32 disp_int;
576	u32 disp_int_cont;
577	u32 disp_int_cont2;
578	u32 disp_int_cont3;
579	u32 disp_int_cont4;
580	u32 disp_int_cont5;
581	u32 d1grph_int;
582	u32 d2grph_int;
583	u32 d3grph_int;
584	u32 d4grph_int;
585	u32 d5grph_int;
586	u32 d6grph_int;
587	u32 afmt_status1;
588	u32 afmt_status2;
589	u32 afmt_status3;
590	u32 afmt_status4;
591	u32 afmt_status5;
592	u32 afmt_status6;
593};
594
595union radeon_irq_stat_regs {
596	struct r500_irq_stat_regs r500;
597	struct r600_irq_stat_regs r600;
598	struct evergreen_irq_stat_regs evergreen;
599};
600
601#define RADEON_MAX_HPD_PINS 6
602#define RADEON_MAX_CRTCS 6
603#define RADEON_MAX_AFMT_BLOCKS 6
604
605struct radeon_irq {
606	bool				installed;
607	struct mtx			lock;
608	atomic_t			ring_int[RADEON_NUM_RINGS];
609	bool				crtc_vblank_int[RADEON_MAX_CRTCS];
610	atomic_t			pflip[RADEON_MAX_CRTCS];
611	wait_queue_head_t		vblank_queue;
612	bool				hpd[RADEON_MAX_HPD_PINS];
613	bool				afmt[RADEON_MAX_AFMT_BLOCKS];
614	union radeon_irq_stat_regs	stat_regs;
615};
616
617int radeon_irq_kms_init(struct radeon_device *rdev);
618void radeon_irq_kms_fini(struct radeon_device *rdev);
619void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
620void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
621void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
622void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
623void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
624void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
625void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
626void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
627
628/*
629 * CP & rings.
630 */
631
632struct radeon_ib {
633	struct radeon_sa_bo		*sa_bo;
634	uint32_t			length_dw;
635	uint64_t			gpu_addr;
636	uint32_t			*ptr;
637	int				ring;
638	struct radeon_fence		*fence;
639	struct radeon_vm		*vm;
640	bool				is_const_ib;
641	struct radeon_fence		*sync_to[RADEON_NUM_RINGS];
642	struct radeon_semaphore		*semaphore;
643};
644
645struct radeon_ring {
646	struct radeon_bo	*ring_obj;
647	volatile uint32_t	*ring;
648	unsigned		rptr;
649	unsigned		rptr_offs;
650	unsigned		rptr_reg;
651	unsigned		rptr_save_reg;
652	u64			next_rptr_gpu_addr;
653	volatile u32		*next_rptr_cpu_addr;
654	unsigned		wptr;
655	unsigned		wptr_old;
656	unsigned		wptr_reg;
657	unsigned		ring_size;
658	unsigned		ring_free_dw;
659	int			count_dw;
660	unsigned long		last_activity;
661	unsigned		last_rptr;
662	uint64_t		gpu_addr;
663	uint32_t		align_mask;
664	uint32_t		ptr_mask;
665	bool			ready;
666	u32			ptr_reg_shift;
667	u32			ptr_reg_mask;
668	u32			nop;
669	u32			idx;
670	u64			last_semaphore_signal_addr;
671	u64			last_semaphore_wait_addr;
672};
673
674/*
675 * VM
676 */
677
678/* maximum number of VMIDs */
679#define RADEON_NUM_VM	16
680
681/* defines number of bits in page table versus page directory,
682 * a page is 4KB so we have 12 bits offset, 9 bits in the page
683 * table and the remaining 19 bits are in the page directory */
684#define RADEON_VM_BLOCK_SIZE   9
685
686/* number of entries in page table */
687#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
688
689struct radeon_vm {
690	struct list_head		list;
691	struct list_head		va;
692	unsigned			id;
693
694	/* contains the page directory */
695	struct radeon_sa_bo		*page_directory;
696	uint64_t			pd_gpu_addr;
697
698	/* array of page tables, one for each page directory entry */
699	struct radeon_sa_bo		**page_tables;
700
701	struct sx			mutex;
702	/* last fence for cs using this vm */
703	struct radeon_fence		*fence;
704	/* last flush or NULL if we still need to flush */
705	struct radeon_fence		*last_flush;
706};
707
708struct radeon_vm_manager {
709	struct sx			lock;
710	struct list_head		lru_vm;
711	struct radeon_fence		*active[RADEON_NUM_VM];
712	struct radeon_sa_manager	sa_manager;
713	uint32_t			max_pfn;
714	/* number of VMIDs */
715	unsigned			nvm;
716	/* vram base address for page table entry  */
717	u64				vram_base_offset;
718	/* is vm enabled? */
719	bool				enabled;
720};
721
722/*
723 * file private structure
724 */
725struct radeon_fpriv {
726	struct radeon_vm		vm;
727};
728
729/*
730 * R6xx+ IH ring
731 */
732struct r600_ih {
733	struct radeon_bo	*ring_obj;
734	volatile uint32_t	*ring;
735	unsigned		rptr;
736	unsigned		ring_size;
737	uint64_t		gpu_addr;
738	uint32_t		ptr_mask;
739	atomic_t		lock;
740	bool                    enabled;
741};
742
743struct r600_blit_cp_primitives {
744	void (*set_render_target)(struct radeon_device *rdev, int format,
745				  int w, int h, u64 gpu_addr);
746	void (*cp_set_surface_sync)(struct radeon_device *rdev,
747				    u32 sync_type, u32 size,
748				    u64 mc_addr);
749	void (*set_shaders)(struct radeon_device *rdev);
750	void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
751	void (*set_tex_resource)(struct radeon_device *rdev,
752				 int format, int w, int h, int pitch,
753				 u64 gpu_addr, u32 size);
754	void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
755			     int x2, int y2);
756	void (*draw_auto)(struct radeon_device *rdev);
757	void (*set_default_state)(struct radeon_device *rdev);
758};
759
760struct r600_blit {
761	struct radeon_bo	*shader_obj;
762	struct r600_blit_cp_primitives primitives;
763	int max_dim;
764	int ring_size_common;
765	int ring_size_per_loop;
766	u64 shader_gpu_addr;
767	u32 vs_offset, ps_offset;
768	u32 state_offset;
769	u32 state_len;
770};
771
772/*
773 * SI RLC stuff
774 */
775struct si_rlc {
776	/* for power gating */
777	struct radeon_bo	*save_restore_obj;
778	uint64_t		save_restore_gpu_addr;
779	/* for clear state */
780	struct radeon_bo	*clear_state_obj;
781	uint64_t		clear_state_gpu_addr;
782};
783
784int radeon_ib_get(struct radeon_device *rdev, int ring,
785		  struct radeon_ib *ib, struct radeon_vm *vm,
786		  unsigned size);
787void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
788int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
789		       struct radeon_ib *const_ib);
790int radeon_ib_pool_init(struct radeon_device *rdev);
791void radeon_ib_pool_fini(struct radeon_device *rdev);
792int radeon_ib_ring_tests(struct radeon_device *rdev);
793/* Ring access between begin & end cannot sleep */
794bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
795				      struct radeon_ring *ring);
796void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
797int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
798int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
799void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
800void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
801void radeon_ring_undo(struct radeon_ring *ring);
802void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
803int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
804void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
805void radeon_ring_lockup_update(struct radeon_ring *ring);
806bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
807unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
808			    uint32_t **data);
809int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
810			unsigned size, uint32_t *data);
811int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
812		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
813		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
814void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
815
816
817/* r600 async dma */
818void r600_dma_stop(struct radeon_device *rdev);
819int r600_dma_resume(struct radeon_device *rdev);
820void r600_dma_fini(struct radeon_device *rdev);
821
822void cayman_dma_stop(struct radeon_device *rdev);
823int cayman_dma_resume(struct radeon_device *rdev);
824void cayman_dma_fini(struct radeon_device *rdev);
825
826/*
827 * CS.
828 */
829struct radeon_cs_reloc {
830	struct drm_gem_object		*gobj;
831	struct radeon_bo		*robj;
832	struct radeon_bo_list		lobj;
833	uint32_t			handle;
834	uint32_t			flags;
835};
836
837struct radeon_cs_chunk {
838	uint32_t		chunk_id;
839	uint32_t		length_dw;
840	int			kpage_idx[2];
841	uint32_t		*kpage[2];
842	uint32_t		*kdata;
843	void __user		*user_ptr;
844	int			last_copied_page;
845	int			last_page_index;
846};
847
848struct radeon_cs_parser {
849	device_t		dev;
850	struct radeon_device	*rdev;
851	struct drm_file		*filp;
852	/* chunks */
853	unsigned		nchunks;
854	struct radeon_cs_chunk	*chunks;
855	uint64_t		*chunks_array;
856	/* IB */
857	unsigned		idx;
858	/* relocations */
859	unsigned		nrelocs;
860	struct radeon_cs_reloc	*relocs;
861	struct radeon_cs_reloc	**relocs_ptr;
862	struct list_head	validated;
863	unsigned		dma_reloc_idx;
864	/* indices of various chunks */
865	int			chunk_ib_idx;
866	int			chunk_relocs_idx;
867	int			chunk_flags_idx;
868	int			chunk_const_ib_idx;
869	struct radeon_ib	ib;
870	struct radeon_ib	const_ib;
871	void			*track;
872	unsigned		family;
873	int			parser_error;
874	u32			cs_flags;
875	u32			ring;
876	s32			priority;
877};
878
879extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
880extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
881
882struct radeon_cs_packet {
883	unsigned	idx;
884	unsigned	type;
885	unsigned	reg;
886	unsigned	opcode;
887	int		count;
888	unsigned	one_reg_wr;
889};
890
891typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
892				      struct radeon_cs_packet *pkt,
893				      unsigned idx, unsigned reg);
894typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
895				      struct radeon_cs_packet *pkt);
896
897
898/*
899 * AGP
900 */
901int radeon_agp_init(struct radeon_device *rdev);
902void radeon_agp_resume(struct radeon_device *rdev);
903void radeon_agp_suspend(struct radeon_device *rdev);
904void radeon_agp_fini(struct radeon_device *rdev);
905
906
907/*
908 * Writeback
909 */
910struct radeon_wb {
911	struct radeon_bo	*wb_obj;
912	volatile uint32_t	*wb;
913	uint64_t		gpu_addr;
914	bool                    enabled;
915	bool                    use_event;
916};
917
918#define RADEON_WB_SCRATCH_OFFSET 0
919#define RADEON_WB_RING0_NEXT_RPTR 256
920#define RADEON_WB_CP_RPTR_OFFSET 1024
921#define RADEON_WB_CP1_RPTR_OFFSET 1280
922#define RADEON_WB_CP2_RPTR_OFFSET 1536
923#define R600_WB_DMA_RPTR_OFFSET   1792
924#define R600_WB_IH_WPTR_OFFSET   2048
925#define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
926#define R600_WB_EVENT_OFFSET     3072
927
928/**
929 * struct radeon_pm - power management datas
930 * @max_bandwidth:      maximum bandwidth the gpu has (MByte/s)
931 * @igp_sideport_mclk:  sideport memory clock Mhz (rs690,rs740,rs780,rs880)
932 * @igp_system_mclk:    system clock Mhz (rs690,rs740,rs780,rs880)
933 * @igp_ht_link_clk:    ht link clock Mhz (rs690,rs740,rs780,rs880)
934 * @igp_ht_link_width:  ht link width in bits (rs690,rs740,rs780,rs880)
935 * @k8_bandwidth:       k8 bandwidth the gpu has (MByte/s) (IGP)
936 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
937 * @ht_bandwidth:       ht bandwidth the gpu has (MByte/s) (IGP)
938 * @core_bandwidth:     core GPU bandwidth the gpu has (MByte/s) (IGP)
939 * @sclk:          	GPU clock Mhz (core bandwidth depends of this clock)
940 * @needed_bandwidth:   current bandwidth needs
941 *
942 * It keeps track of various data needed to take powermanagement decision.
943 * Bandwidth need is used to determine minimun clock of the GPU and memory.
944 * Equation between gpu/memory clock and available bandwidth is hw dependent
945 * (type of memory, bus size, efficiency, ...)
946 */
947
948enum radeon_pm_method {
949	PM_METHOD_PROFILE,
950	PM_METHOD_DYNPM,
951};
952
953enum radeon_dynpm_state {
954	DYNPM_STATE_DISABLED,
955	DYNPM_STATE_MINIMUM,
956	DYNPM_STATE_PAUSED,
957	DYNPM_STATE_ACTIVE,
958	DYNPM_STATE_SUSPENDED,
959};
960enum radeon_dynpm_action {
961	DYNPM_ACTION_NONE,
962	DYNPM_ACTION_MINIMUM,
963	DYNPM_ACTION_DOWNCLOCK,
964	DYNPM_ACTION_UPCLOCK,
965	DYNPM_ACTION_DEFAULT
966};
967
968enum radeon_voltage_type {
969	VOLTAGE_NONE = 0,
970	VOLTAGE_GPIO,
971	VOLTAGE_VDDC,
972	VOLTAGE_SW
973};
974
975enum radeon_pm_state_type {
976	POWER_STATE_TYPE_DEFAULT,
977	POWER_STATE_TYPE_POWERSAVE,
978	POWER_STATE_TYPE_BATTERY,
979	POWER_STATE_TYPE_BALANCED,
980	POWER_STATE_TYPE_PERFORMANCE,
981};
982
983enum radeon_pm_profile_type {
984	PM_PROFILE_DEFAULT,
985	PM_PROFILE_AUTO,
986	PM_PROFILE_LOW,
987	PM_PROFILE_MID,
988	PM_PROFILE_HIGH,
989};
990
991#define PM_PROFILE_DEFAULT_IDX 0
992#define PM_PROFILE_LOW_SH_IDX  1
993#define PM_PROFILE_MID_SH_IDX  2
994#define PM_PROFILE_HIGH_SH_IDX 3
995#define PM_PROFILE_LOW_MH_IDX  4
996#define PM_PROFILE_MID_MH_IDX  5
997#define PM_PROFILE_HIGH_MH_IDX 6
998#define PM_PROFILE_MAX         7
999
1000struct radeon_pm_profile {
1001	int dpms_off_ps_idx;
1002	int dpms_on_ps_idx;
1003	int dpms_off_cm_idx;
1004	int dpms_on_cm_idx;
1005};
1006
1007enum radeon_int_thermal_type {
1008	THERMAL_TYPE_NONE,
1009	THERMAL_TYPE_RV6XX,
1010	THERMAL_TYPE_RV770,
1011	THERMAL_TYPE_EVERGREEN,
1012	THERMAL_TYPE_SUMO,
1013	THERMAL_TYPE_NI,
1014	THERMAL_TYPE_SI,
1015};
1016
1017struct radeon_voltage {
1018	enum radeon_voltage_type type;
1019	/* gpio voltage */
1020	struct radeon_gpio_rec gpio;
1021	u32 delay; /* delay in usec from voltage drop to sclk change */
1022	bool active_high; /* voltage drop is active when bit is high */
1023	/* VDDC voltage */
1024	u8 vddc_id; /* index into vddc voltage table */
1025	u8 vddci_id; /* index into vddci voltage table */
1026	bool vddci_enabled;
1027	/* r6xx+ sw */
1028	u16 voltage;
1029	/* evergreen+ vddci */
1030	u16 vddci;
1031};
1032
1033/* clock mode flags */
1034#define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)
1035
1036struct radeon_pm_clock_info {
1037	/* memory clock */
1038	u32 mclk;
1039	/* engine clock */
1040	u32 sclk;
1041	/* voltage info */
1042	struct radeon_voltage voltage;
1043	/* standardized clock flags */
1044	u32 flags;
1045};
1046
1047/* state flags */
1048#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
1049
1050struct radeon_power_state {
1051	enum radeon_pm_state_type type;
1052	struct radeon_pm_clock_info *clock_info;
1053	/* number of valid clock modes in this power state */
1054	int num_clock_modes;
1055	struct radeon_pm_clock_info *default_clock_mode;
1056	/* standardized state flags */
1057	u32 flags;
1058	u32 misc; /* vbios specific flags */
1059	u32 misc2; /* vbios specific flags */
1060	int pcie_lanes; /* pcie lanes */
1061};
1062
1063/*
1064 * Some modes are overclocked by very low value, accept them
1065 */
1066#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
1067
1068struct radeon_pm {
1069	struct sx		mutex;
1070	/* write locked while reprogramming mclk */
1071	struct sx		mclk_lock;
1072	u32			active_crtcs;
1073	int			active_crtc_count;
1074	int			req_vblank;
1075	bool			vblank_sync;
1076	fixed20_12		max_bandwidth;
1077	fixed20_12		igp_sideport_mclk;
1078	fixed20_12		igp_system_mclk;
1079	fixed20_12		igp_ht_link_clk;
1080	fixed20_12		igp_ht_link_width;
1081	fixed20_12		k8_bandwidth;
1082	fixed20_12		sideport_bandwidth;
1083	fixed20_12		ht_bandwidth;
1084	fixed20_12		core_bandwidth;
1085	fixed20_12		sclk;
1086	fixed20_12		mclk;
1087	fixed20_12		needed_bandwidth;
1088	struct radeon_power_state *power_state;
1089	/* number of valid power states */
1090	int                     num_power_states;
1091	int                     current_power_state_index;
1092	int                     current_clock_mode_index;
1093	int                     requested_power_state_index;
1094	int                     requested_clock_mode_index;
1095	int                     default_power_state_index;
1096	u32                     current_sclk;
1097	u32                     current_mclk;
1098	u16                     current_vddc;
1099	u16                     current_vddci;
1100	u32                     default_sclk;
1101	u32                     default_mclk;
1102	u16                     default_vddc;
1103	u16                     default_vddci;
1104	struct radeon_i2c_chan *i2c_bus;
1105	/* selected pm method */
1106	enum radeon_pm_method     pm_method;
1107	/* dynpm power management */
1108#ifdef FREEBSD_WIP
1109	struct delayed_work	dynpm_idle_work;
1110#endif /* FREEBSD_WIP */
1111	enum radeon_dynpm_state	dynpm_state;
1112	enum radeon_dynpm_action	dynpm_planned_action;
1113	unsigned long		dynpm_action_timeout;
1114	bool                    dynpm_can_upclock;
1115	bool                    dynpm_can_downclock;
1116	/* profile-based power management */
1117	enum radeon_pm_profile_type profile;
1118	int                     profile_index;
1119	struct radeon_pm_profile profiles[PM_PROFILE_MAX];
1120	/* internal thermal controller on rv6xx+ */
1121	enum radeon_int_thermal_type int_thermal_type;
1122#ifdef FREEBSD_WIP
1123	struct device	        *int_hwmon_dev;
1124#endif /* FREEBSD_WIP */
1125};
1126
1127int radeon_pm_get_type_index(struct radeon_device *rdev,
1128			     enum radeon_pm_state_type ps_type,
1129			     int instance);
1130
1131struct r600_audio {
1132	int			channels;
1133	int			rate;
1134	int			bits_per_sample;
1135	u8			status_bits;
1136	u8			category_code;
1137};
1138
1139/*
1140 * Benchmarking
1141 */
1142void radeon_benchmark(struct radeon_device *rdev, int test_number);
1143
1144
1145/*
1146 * Testing
1147 */
1148void radeon_test_moves(struct radeon_device *rdev);
1149void radeon_test_ring_sync(struct radeon_device *rdev,
1150			   struct radeon_ring *cpA,
1151			   struct radeon_ring *cpB);
1152void radeon_test_syncing(struct radeon_device *rdev);
1153
1154
1155/*
1156 * Debugfs
1157 */
1158struct radeon_debugfs {
1159	struct drm_info_list	*files;
1160	unsigned		num_files;
1161};
1162
1163int radeon_debugfs_add_files(struct radeon_device *rdev,
1164			     struct drm_info_list *files,
1165			     unsigned nfiles);
1166int radeon_debugfs_fence_init(struct radeon_device *rdev);
1167
1168
1169/*
1170 * ASIC specific functions.
1171 */
1172struct radeon_asic {
1173	int (*init)(struct radeon_device *rdev);
1174	void (*fini)(struct radeon_device *rdev);
1175	int (*resume)(struct radeon_device *rdev);
1176	int (*suspend)(struct radeon_device *rdev);
1177	void (*vga_set_state)(struct radeon_device *rdev, bool state);
1178	int (*asic_reset)(struct radeon_device *rdev);
1179	/* ioctl hw specific callback. Some hw might want to perform special
1180	 * operation on specific ioctl. For instance on wait idle some hw
1181	 * might want to perform and HDP flush through MMIO as it seems that
1182	 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
1183	 * through ring.
1184	 */
1185	void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
1186	/* check if 3D engine is idle */
1187	bool (*gui_idle)(struct radeon_device *rdev);
1188	/* wait for mc_idle */
1189	int (*mc_wait_for_idle)(struct radeon_device *rdev);
1190	/* gart */
1191	struct {
1192		void (*tlb_flush)(struct radeon_device *rdev);
1193		int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
1194	} gart;
1195	struct {
1196		int (*init)(struct radeon_device *rdev);
1197		void (*fini)(struct radeon_device *rdev);
1198
1199		u32 pt_ring_index;
1200		void (*set_page)(struct radeon_device *rdev, uint64_t pe,
1201				 uint64_t addr, unsigned count,
1202				 uint32_t incr, uint32_t flags);
1203	} vm;
1204	/* ring specific callbacks */
1205	struct {
1206		void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1207		int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1208		void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1209		void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1210				       struct radeon_semaphore *semaphore, bool emit_wait);
1211		int (*cs_parse)(struct radeon_cs_parser *p);
1212		void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1213		int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1214		int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1215		bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1216		void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1217	} ring[RADEON_NUM_RINGS];
1218	/* irqs */
1219	struct {
1220		int (*set)(struct radeon_device *rdev);
1221		irqreturn_t (*process)(struct radeon_device *rdev);
1222	} irq;
1223	/* displays */
1224	struct {
1225		/* display watermarks */
1226		void (*bandwidth_update)(struct radeon_device *rdev);
1227		/* get frame count */
1228		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1229		/* wait for vblank */
1230		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1231		/* set backlight level */
1232		void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
1233		/* get backlight level */
1234		u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
1235	} display;
1236	/* copy functions for bo handling */
1237	struct {
1238		int (*blit)(struct radeon_device *rdev,
1239			    uint64_t src_offset,
1240			    uint64_t dst_offset,
1241			    unsigned num_gpu_pages,
1242			    struct radeon_fence **fence);
1243		u32 blit_ring_index;
1244		int (*dma)(struct radeon_device *rdev,
1245			   uint64_t src_offset,
1246			   uint64_t dst_offset,
1247			   unsigned num_gpu_pages,
1248			   struct radeon_fence **fence);
1249		u32 dma_ring_index;
1250		/* method used for bo copy */
1251		int (*copy)(struct radeon_device *rdev,
1252			    uint64_t src_offset,
1253			    uint64_t dst_offset,
1254			    unsigned num_gpu_pages,
1255			    struct radeon_fence **fence);
1256		/* ring used for bo copies */
1257		u32 copy_ring_index;
1258	} copy;
1259	/* surfaces */
1260	struct {
1261		int (*set_reg)(struct radeon_device *rdev, int reg,
1262				       uint32_t tiling_flags, uint32_t pitch,
1263				       uint32_t offset, uint32_t obj_size);
1264		void (*clear_reg)(struct radeon_device *rdev, int reg);
1265	} surface;
1266	/* hotplug detect */
1267	struct {
1268		void (*init)(struct radeon_device *rdev);
1269		void (*fini)(struct radeon_device *rdev);
1270		bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1271		void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1272	} hpd;
1273	/* power management */
1274	struct {
1275		void (*misc)(struct radeon_device *rdev);
1276		void (*prepare)(struct radeon_device *rdev);
1277		void (*finish)(struct radeon_device *rdev);
1278		void (*init_profile)(struct radeon_device *rdev);
1279		void (*get_dynpm_state)(struct radeon_device *rdev);
1280		uint32_t (*get_engine_clock)(struct radeon_device *rdev);
1281		void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
1282		uint32_t (*get_memory_clock)(struct radeon_device *rdev);
1283		void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
1284		int (*get_pcie_lanes)(struct radeon_device *rdev);
1285		void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1286		void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1287	} pm;
1288	/* pageflipping */
1289	struct {
1290		void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
1291		u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
1292		void (*post_page_flip)(struct radeon_device *rdev, int crtc);
1293	} pflip;
1294};
1295
1296/*
1297 * Asic structures
1298 */
1299struct r100_asic {
1300	const unsigned		*reg_safe_bm;
1301	unsigned		reg_safe_bm_size;
1302	u32			hdp_cntl;
1303};
1304
1305struct r300_asic {
1306	const unsigned		*reg_safe_bm;
1307	unsigned		reg_safe_bm_size;
1308	u32			resync_scratch;
1309	u32			hdp_cntl;
1310};
1311
1312struct r600_asic {
1313	unsigned		max_pipes;
1314	unsigned		max_tile_pipes;
1315	unsigned		max_simds;
1316	unsigned		max_backends;
1317	unsigned		max_gprs;
1318	unsigned		max_threads;
1319	unsigned		max_stack_entries;
1320	unsigned		max_hw_contexts;
1321	unsigned		max_gs_threads;
1322	unsigned		sx_max_export_size;
1323	unsigned		sx_max_export_pos_size;
1324	unsigned		sx_max_export_smx_size;
1325	unsigned		sq_num_cf_insts;
1326	unsigned		tiling_nbanks;
1327	unsigned		tiling_npipes;
1328	unsigned		tiling_group_size;
1329	unsigned		tile_config;
1330	unsigned		backend_map;
1331};
1332
1333struct rv770_asic {
1334	unsigned		max_pipes;
1335	unsigned		max_tile_pipes;
1336	unsigned		max_simds;
1337	unsigned		max_backends;
1338	unsigned		max_gprs;
1339	unsigned		max_threads;
1340	unsigned		max_stack_entries;
1341	unsigned		max_hw_contexts;
1342	unsigned		max_gs_threads;
1343	unsigned		sx_max_export_size;
1344	unsigned		sx_max_export_pos_size;
1345	unsigned		sx_max_export_smx_size;
1346	unsigned		sq_num_cf_insts;
1347	unsigned		sx_num_of_sets;
1348	unsigned		sc_prim_fifo_size;
1349	unsigned		sc_hiz_tile_fifo_size;
1350	unsigned		sc_earlyz_tile_fifo_fize;
1351	unsigned		tiling_nbanks;
1352	unsigned		tiling_npipes;
1353	unsigned		tiling_group_size;
1354	unsigned		tile_config;
1355	unsigned		backend_map;
1356};
1357
1358struct evergreen_asic {
1359	unsigned num_ses;
1360	unsigned max_pipes;
1361	unsigned max_tile_pipes;
1362	unsigned max_simds;
1363	unsigned max_backends;
1364	unsigned max_gprs;
1365	unsigned max_threads;
1366	unsigned max_stack_entries;
1367	unsigned max_hw_contexts;
1368	unsigned max_gs_threads;
1369	unsigned sx_max_export_size;
1370	unsigned sx_max_export_pos_size;
1371	unsigned sx_max_export_smx_size;
1372	unsigned sq_num_cf_insts;
1373	unsigned sx_num_of_sets;
1374	unsigned sc_prim_fifo_size;
1375	unsigned sc_hiz_tile_fifo_size;
1376	unsigned sc_earlyz_tile_fifo_size;
1377	unsigned tiling_nbanks;
1378	unsigned tiling_npipes;
1379	unsigned tiling_group_size;
1380	unsigned tile_config;
1381	unsigned backend_map;
1382};
1383
1384struct cayman_asic {
1385	unsigned max_shader_engines;
1386	unsigned max_pipes_per_simd;
1387	unsigned max_tile_pipes;
1388	unsigned max_simds_per_se;
1389	unsigned max_backends_per_se;
1390	unsigned max_texture_channel_caches;
1391	unsigned max_gprs;
1392	unsigned max_threads;
1393	unsigned max_gs_threads;
1394	unsigned max_stack_entries;
1395	unsigned sx_num_of_sets;
1396	unsigned sx_max_export_size;
1397	unsigned sx_max_export_pos_size;
1398	unsigned sx_max_export_smx_size;
1399	unsigned max_hw_contexts;
1400	unsigned sq_num_cf_insts;
1401	unsigned sc_prim_fifo_size;
1402	unsigned sc_hiz_tile_fifo_size;
1403	unsigned sc_earlyz_tile_fifo_size;
1404
1405	unsigned num_shader_engines;
1406	unsigned num_shader_pipes_per_simd;
1407	unsigned num_tile_pipes;
1408	unsigned num_simds_per_se;
1409	unsigned num_backends_per_se;
1410	unsigned backend_disable_mask_per_asic;
1411	unsigned backend_map;
1412	unsigned num_texture_channel_caches;
1413	unsigned mem_max_burst_length_bytes;
1414	unsigned mem_row_size_in_kb;
1415	unsigned shader_engine_tile_size;
1416	unsigned num_gpus;
1417	unsigned multi_gpu_tile_size;
1418
1419	unsigned tile_config;
1420};
1421
1422struct si_asic {
1423	unsigned max_shader_engines;
1424	unsigned max_tile_pipes;
1425	unsigned max_cu_per_sh;
1426	unsigned max_sh_per_se;
1427	unsigned max_backends_per_se;
1428	unsigned max_texture_channel_caches;
1429	unsigned max_gprs;
1430	unsigned max_gs_threads;
1431	unsigned max_hw_contexts;
1432	unsigned sc_prim_fifo_size_frontend;
1433	unsigned sc_prim_fifo_size_backend;
1434	unsigned sc_hiz_tile_fifo_size;
1435	unsigned sc_earlyz_tile_fifo_size;
1436
1437	unsigned num_tile_pipes;
1438	unsigned num_backends_per_se;
1439	unsigned backend_disable_mask_per_asic;
1440	unsigned backend_map;
1441	unsigned num_texture_channel_caches;
1442	unsigned mem_max_burst_length_bytes;
1443	unsigned mem_row_size_in_kb;
1444	unsigned shader_engine_tile_size;
1445	unsigned num_gpus;
1446	unsigned multi_gpu_tile_size;
1447
1448	unsigned tile_config;
1449};
1450
1451union radeon_asic_config {
1452	struct r300_asic	r300;
1453	struct r100_asic	r100;
1454	struct r600_asic	r600;
1455	struct rv770_asic	rv770;
1456	struct evergreen_asic	evergreen;
1457	struct cayman_asic	cayman;
1458	struct si_asic		si;
1459};
1460
1461/*
1462 * asic initizalization from radeon_asic.c
1463 */
1464int radeon_asic_init(struct radeon_device *rdev);
1465
1466
1467/*
1468 * IOCTL.
1469 */
1470int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
1471			  struct drm_file *filp);
1472int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
1473			    struct drm_file *filp);
1474int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
1475			 struct drm_file *file_priv);
1476int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
1477			   struct drm_file *file_priv);
1478int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1479			    struct drm_file *file_priv);
1480int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
1481			   struct drm_file *file_priv);
1482int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1483				struct drm_file *filp);
1484int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
1485			  struct drm_file *filp);
1486int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
1487			  struct drm_file *filp);
1488int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1489			      struct drm_file *filp);
1490int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
1491			  struct drm_file *filp);
1492int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1493int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1494				struct drm_file *filp);
1495int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1496				struct drm_file *filp);
1497
1498/* VRAM scratch page for HDP bug, default vram page */
1499struct r600_vram_scratch {
1500	struct radeon_bo		*robj;
1501	volatile uint32_t		*ptr;
1502	u64				gpu_addr;
1503};
1504
1505/*
1506 * ACPI
1507 */
1508struct radeon_atif_notification_cfg {
1509	bool enabled;
1510	int command_code;
1511};
1512
1513struct radeon_atif_notifications {
1514	bool display_switch;
1515	bool expansion_mode_change;
1516	bool thermal_state;
1517	bool forced_power_state;
1518	bool system_power_state;
1519	bool display_conf_change;
1520	bool px_gfx_switch;
1521	bool brightness_change;
1522	bool dgpu_display_event;
1523};
1524
1525struct radeon_atif_functions {
1526	bool system_params;
1527	bool sbios_requests;
1528	bool select_active_disp;
1529	bool lid_state;
1530	bool get_tv_standard;
1531	bool set_tv_standard;
1532	bool get_panel_expansion_mode;
1533	bool set_panel_expansion_mode;
1534	bool temperature_change;
1535	bool graphics_device_types;
1536};
1537
1538struct radeon_atif {
1539	struct radeon_atif_notifications notifications;
1540	struct radeon_atif_functions functions;
1541	struct radeon_atif_notification_cfg notification_cfg;
1542	struct radeon_encoder *encoder_for_bl;
1543};
1544
1545struct radeon_atcs_functions {
1546	bool get_ext_state;
1547	bool pcie_perf_req;
1548	bool pcie_dev_rdy;
1549	bool pcie_bus_width;
1550};
1551
1552struct radeon_atcs {
1553	struct radeon_atcs_functions functions;
1554};
1555
1556/*
1557 * Core structure, functions and helpers.
1558 */
1559typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
1560typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
1561
1562struct radeon_device {
1563	device_t			dev;
1564	struct drm_device		*ddev;
1565	struct sx			exclusive_lock;
1566	/* ASIC */
1567	union radeon_asic_config	config;
1568	enum radeon_family		family;
1569	unsigned long			flags;
1570	int				usec_timeout;
1571	enum radeon_pll_errata		pll_errata;
1572	int				num_gb_pipes;
1573	int				num_z_pipes;
1574	int				disp_priority;
1575	/* BIOS */
1576	uint8_t				*bios;
1577	bool				is_atom_bios;
1578	uint16_t			bios_header_start;
1579	struct radeon_bo		*stollen_vga_memory;
1580	/* Register mmio */
1581	resource_size_t			rmmio_base;
1582	resource_size_t			rmmio_size;
1583	/* protects concurrent MM_INDEX/DATA based register access */
1584	struct mtx			mmio_idx_lock;
1585	int				rmmio_rid;
1586	struct resource			*rmmio;
1587	radeon_rreg_t			mc_rreg;
1588	radeon_wreg_t			mc_wreg;
1589	radeon_rreg_t			pll_rreg;
1590	radeon_wreg_t			pll_wreg;
1591	uint32_t                        pcie_reg_mask;
1592	radeon_rreg_t			pciep_rreg;
1593	radeon_wreg_t			pciep_wreg;
1594	/* io port */
1595	int				rio_rid;
1596	struct resource			*rio_mem;
1597	resource_size_t			rio_mem_size;
1598	struct radeon_clock             clock;
1599	struct radeon_mc		mc;
1600	struct radeon_gart		gart;
1601	struct radeon_mode_info		mode_info;
1602	struct radeon_scratch		scratch;
1603	struct radeon_mman		mman;
1604	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
1605	struct cv			fence_queue;
1606	struct mtx			fence_queue_mtx;
1607	struct sx			ring_lock;
1608	struct radeon_ring		ring[RADEON_NUM_RINGS];
1609	bool				ib_pool_ready;
1610	struct radeon_sa_manager	ring_tmp_bo;
1611	struct radeon_irq		irq;
1612	struct radeon_asic		*asic;
1613	struct radeon_gem		gem;
1614	struct radeon_pm		pm;
1615	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1616	struct radeon_wb		wb;
1617	struct radeon_dummy_page	dummy_page;
1618	bool				shutdown;
1619	bool				suspend;
1620	bool				need_dma32;
1621	bool				accel_working;
1622	bool				fictitious_range_registered;
1623	bool				fictitious_agp_range_registered;
1624	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
1625	const struct firmware *me_fw;	/* all family ME firmware */
1626	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
1627	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
1628	const struct firmware *mc_fw;	/* NI MC firmware */
1629	const struct firmware *ce_fw;	/* SI CE firmware */
1630	struct r600_blit r600_blit;
1631	struct r600_vram_scratch vram_scratch;
1632	int msi_enabled; /* msi enabled */
1633	struct r600_ih ih; /* r6/700 interrupt ring */
1634	struct si_rlc rlc;
1635	struct taskqueue *tq;
1636	struct task hotplug_work;
1637	struct task audio_work;
1638	int num_crtc; /* number of crtcs */
1639	struct sx dc_hw_i2c_mutex; /* display controller hw i2c mutex */
1640	bool audio_enabled;
1641	struct r600_audio audio_status; /* audio stuff */
1642#if defined(CONFIG_ACPI)
1643	struct {
1644		ACPI_HANDLE		handle;
1645		ACPI_NOTIFY_HANDLER	notifier_call;
1646	} acpi;
1647#endif
1648	/* only one userspace can use Hyperz features or CMASK at a time */
1649	struct drm_file *hyperz_filp;
1650	struct drm_file *cmask_filp;
1651	/* i2c buses */
1652	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
1653	/* debugfs */
1654	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
1655	unsigned 		debugfs_count;
1656	/* virtual memory */
1657	struct radeon_vm_manager	vm_manager;
1658	struct sx			gpu_clock_mutex;
1659	/* ACPI interface */
1660	struct radeon_atif		atif;
1661	struct radeon_atcs		atcs;
1662};
1663
1664int radeon_device_init(struct radeon_device *rdev,
1665		       struct drm_device *ddev,
1666		       uint32_t flags);
1667void radeon_device_fini(struct radeon_device *rdev);
1668int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1669
1670uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
1671		      bool always_indirect);
1672void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
1673		  bool always_indirect);
1674u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1675void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1676
1677/*
1678 * Cast helper
1679 */
1680#define to_radeon_fence(p) ((struct radeon_fence *)(p))
1681
1682/*
1683 * Registers read & write functions.
1684 */
1685#define RREG8(reg) bus_read_1((rdev->rmmio), (reg))
1686#define WREG8(reg, v) bus_write_1((rdev->rmmio), (reg), v)
1687#define RREG16(reg) bus_read_2((rdev->rmmio), (reg))
1688#define WREG16(reg, v) bus_write_2((rdev->rmmio), (reg), v)
1689#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
1690#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
1691#define DREG32(reg) DRM_INFO("REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
1692#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
1693#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
1694#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1695#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1696#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
1697#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
1698#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
1699#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
1700#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
1701#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1702#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1703#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
1704#define WREG32_P(reg, val, mask)				\
1705	do {							\
1706		uint32_t tmp_ = RREG32(reg);			\
1707		tmp_ &= (mask);					\
1708		tmp_ |= ((val) & ~(mask));			\
1709		WREG32(reg, tmp_);				\
1710	} while (0)
1711#define WREG32_PLL_P(reg, val, mask)				\
1712	do {							\
1713		uint32_t tmp_ = RREG32_PLL(reg);		\
1714		tmp_ &= (mask);					\
1715		tmp_ |= ((val) & ~(mask));			\
1716		WREG32_PLL(reg, tmp_);				\
1717	} while (0)
1718#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
1719#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1720#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
1721
1722/*
1723 * Indirect registers accessor
1724 */
1725static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
1726{
1727	uint32_t r;
1728
1729	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1730	r = RREG32(RADEON_PCIE_DATA);
1731	return r;
1732}
1733
1734static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1735{
1736	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1737	WREG32(RADEON_PCIE_DATA, (v));
1738}
1739
1740void r100_pll_errata_after_index(struct radeon_device *rdev);
1741
1742
1743/*
1744 * ASICs helpers.
1745 */
1746#define ASIC_IS_RN50(rdev) ((rdev->ddev->pci_device == 0x515e) || \
1747			    (rdev->ddev->pci_device == 0x5969))
1748#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
1749		(rdev->family == CHIP_RV200) || \
1750		(rdev->family == CHIP_RS100) || \
1751		(rdev->family == CHIP_RS200) || \
1752		(rdev->family == CHIP_RV250) || \
1753		(rdev->family == CHIP_RV280) || \
1754		(rdev->family == CHIP_RS300))
1755#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300)  ||	\
1756		(rdev->family == CHIP_RV350) ||			\
1757		(rdev->family == CHIP_R350)  ||			\
1758		(rdev->family == CHIP_RV380) ||			\
1759		(rdev->family == CHIP_R420)  ||			\
1760		(rdev->family == CHIP_R423)  ||			\
1761		(rdev->family == CHIP_RV410) ||			\
1762		(rdev->family == CHIP_RS400) ||			\
1763		(rdev->family == CHIP_RS480))
1764#define ASIC_IS_X2(rdev) ((rdev->ddev->pci_device == 0x9441) || \
1765		(rdev->ddev->pci_device == 0x9443) || \
1766		(rdev->ddev->pci_device == 0x944B) || \
1767		(rdev->ddev->pci_device == 0x9506) || \
1768		(rdev->ddev->pci_device == 0x9509) || \
1769		(rdev->ddev->pci_device == 0x950F) || \
1770		(rdev->ddev->pci_device == 0x689C) || \
1771		(rdev->ddev->pci_device == 0x689D))
1772#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
1773#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
1774			    (rdev->family == CHIP_RS690)  ||	\
1775			    (rdev->family == CHIP_RS740)  ||	\
1776			    (rdev->family >= CHIP_R600))
1777#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1778#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
1779#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
1780#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
1781			     (rdev->flags & RADEON_IS_IGP))
1782#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
1783#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
1784#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
1785			     (rdev->flags & RADEON_IS_IGP))
1786
1787/*
1788 * BIOS helpers.
1789 */
1790#define RBIOS8(i) (rdev->bios[i])
1791#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1792#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1793
1794int radeon_combios_init(struct radeon_device *rdev);
1795void radeon_combios_fini(struct radeon_device *rdev);
1796int radeon_atombios_init(struct radeon_device *rdev);
1797void radeon_atombios_fini(struct radeon_device *rdev);
1798
1799
1800/*
1801 * RING helpers.
1802 */
1803#if DRM_DEBUG_CODE == 0
1804static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
1805{
1806	ring->ring[ring->wptr++] = v;
1807	ring->wptr &= ring->ptr_mask;
1808	ring->count_dw--;
1809	ring->ring_free_dw--;
1810}
1811#else
1812/* With debugging this is just too big to inline */
1813void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1814#endif
1815
1816/*
1817 * ASICs macro.
1818 */
1819#define radeon_init(rdev) (rdev)->asic->init((rdev))
1820#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
1821#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
1822#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1823#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
1824#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1825#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1826#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1827#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
1828#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
1829#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
1830#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
1831#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
1832#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
1833#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
1834#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
1835#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1836#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
1837#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
1838#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1839#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
1840#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
1841#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
1842#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
1843#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1844#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
1845#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
1846#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
1847#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
1848#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
1849#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
1850#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
1851#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
1852#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
1853#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
1854#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
1855#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
1856#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1857#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
1858#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1859#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
1860#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
1861#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
1862#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
1863#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
1864#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
1865#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
1866#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
1867#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
1868#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
1869#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
1870#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
1871#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
1872#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
1873#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
1874#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
1875#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
1876
1877/* Common functions */
1878/* AGP */
1879extern int radeon_gpu_reset(struct radeon_device *rdev);
1880extern void radeon_agp_disable(struct radeon_device *rdev);
1881extern int radeon_modeset_init(struct radeon_device *rdev);
1882extern void radeon_modeset_fini(struct radeon_device *rdev);
1883extern bool radeon_card_posted(struct radeon_device *rdev);
1884extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1885extern void radeon_update_display_priority(struct radeon_device *rdev);
1886extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1887extern void radeon_scratch_init(struct radeon_device *rdev);
1888extern void radeon_wb_fini(struct radeon_device *rdev);
1889extern int radeon_wb_init(struct radeon_device *rdev);
1890extern void radeon_wb_disable(struct radeon_device *rdev);
1891extern void radeon_surface_init(struct radeon_device *rdev);
1892extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
1893extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1894extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
1895extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1896extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1897extern int radeon_resume_kms(struct drm_device *dev);
1898extern int radeon_suspend_kms(struct drm_device *dev);
1899extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1900
1901/*
1902 * vm
1903 */
1904int radeon_vm_manager_init(struct radeon_device *rdev);
1905void radeon_vm_manager_fini(struct radeon_device *rdev);
1906void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1907void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1908int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
1909void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
1910struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
1911				       struct radeon_vm *vm, int ring);
1912void radeon_vm_fence(struct radeon_device *rdev,
1913		     struct radeon_vm *vm,
1914		     struct radeon_fence *fence);
1915uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
1916int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1917			    struct radeon_vm *vm,
1918			    struct radeon_bo *bo,
1919			    struct ttm_mem_reg *mem);
1920void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1921			     struct radeon_bo *bo);
1922struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
1923				       struct radeon_bo *bo);
1924struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
1925				      struct radeon_vm *vm,
1926				      struct radeon_bo *bo);
1927int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1928			  struct radeon_bo_va *bo_va,
1929			  uint64_t offset,
1930			  uint32_t flags);
1931int radeon_vm_bo_rmv(struct radeon_device *rdev,
1932		     struct radeon_bo_va *bo_va);
1933
1934/* audio */
1935void r600_audio_update_hdmi(void *arg, int pending);
1936
1937/*
1938 * R600 vram scratch functions
1939 */
1940int r600_vram_scratch_init(struct radeon_device *rdev);
1941void r600_vram_scratch_fini(struct radeon_device *rdev);
1942
1943/*
1944 * r600 cs checking helper
1945 */
1946unsigned r600_mip_minify(unsigned size, unsigned level);
1947bool r600_fmt_is_valid_color(u32 format);
1948bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
1949int r600_fmt_get_blocksize(u32 format);
1950int r600_fmt_get_nblocksx(u32 format, u32 w);
1951int r600_fmt_get_nblocksy(u32 format, u32 h);
1952
1953/*
1954 * r600 functions used by radeon_encoder.c
1955 */
1956struct radeon_hdmi_acr {
1957	u32 clock;
1958
1959	int n_32khz;
1960	int cts_32khz;
1961
1962	int n_44_1khz;
1963	int cts_44_1khz;
1964
1965	int n_48khz;
1966	int cts_48khz;
1967
1968};
1969
1970extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1971
1972extern void r600_hdmi_enable(struct drm_encoder *encoder);
1973extern void r600_hdmi_disable(struct drm_encoder *encoder);
1974extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1975extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1976				     u32 tiling_pipe_num,
1977				     u32 max_rb_num,
1978				     u32 total_max_rb_num,
1979				     u32 enabled_rb_mask);
1980
1981/*
1982 * evergreen functions used by radeon_encoder.c
1983 */
1984
1985extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1986
1987extern int ni_init_microcode(struct radeon_device *rdev);
1988extern int ni_mc_load_microcode(struct radeon_device *rdev);
1989extern void ni_fini_microcode(struct radeon_device *rdev);
1990
1991/* radeon_acpi.c */
1992#if defined(CONFIG_ACPI)
1993extern int radeon_acpi_init(struct radeon_device *rdev);
1994extern void radeon_acpi_fini(struct radeon_device *rdev);
1995#else
1996static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1997static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
1998#endif
1999
2000/* Prototypes added by @dumbbell. */
2001
2002/* atombios_encoders.c */
2003void	radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
2004	    struct drm_connector *drm_connector);
2005void	radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
2006	    uint32_t supported_device, u16 caps);
2007
2008/* radeon_atombios.c */
2009bool	radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
2010	    struct drm_display_mode *mode);
2011
2012/* radeon_combios.c */
2013void	radeon_combios_connected_scratch_regs(struct drm_connector *connector,
2014	    struct drm_encoder *encoder, bool connected);
2015
2016/* radeon_connectors.c */
2017void	radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
2018	    struct drm_encoder *encoder, bool connected);
2019void	radeon_add_legacy_connector(struct drm_device *dev,
2020	    uint32_t connector_id,
2021	    uint32_t supported_device,
2022	    int connector_type,
2023	    struct radeon_i2c_bus_rec *i2c_bus,
2024	    uint16_t connector_object_id,
2025	    struct radeon_hpd *hpd);
2026void	radeon_add_atom_connector(struct drm_device *dev,
2027	    uint32_t connector_id,
2028	    uint32_t supported_device,
2029	    int connector_type,
2030	    struct radeon_i2c_bus_rec *i2c_bus,
2031	    uint32_t igp_lane_info,
2032	    uint16_t connector_object_id,
2033	    struct radeon_hpd *hpd,
2034	    struct radeon_router *router);
2035
2036/* radeon_encoders.c */
2037uint32_t	radeon_get_encoder_enum(struct drm_device *dev,
2038		    uint32_t supported_device, uint8_t dac);
2039void		radeon_link_encoder_connector(struct drm_device *dev);
2040
2041/* radeon_legacy_encoders.c */
2042void	radeon_add_legacy_encoder(struct drm_device *dev,
2043	    uint32_t encoder_enum, uint32_t supported_device);
2044void	radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
2045	    struct drm_connector *drm_connector);
2046
2047/* radeon_pm.c */
2048void	radeon_pm_acpi_event_handler(struct radeon_device *rdev);
2049
2050/* radeon_ttm.c */
2051int	radeon_ttm_init(struct radeon_device *rdev);
2052void	radeon_ttm_fini(struct radeon_device *rdev);
2053
2054/* radeon_fb.c */
2055struct fb_info *	radeon_fb_helper_getinfo(device_t kdev);
2056
2057/* r600.c */
2058int r600_ih_ring_alloc(struct radeon_device *rdev);
2059void r600_ih_ring_fini(struct radeon_device *rdev);
2060
2061#include "radeon_object.h"
2062
2063#endif
2064