69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/linker.h> 73#include <sys/firmware.h> 74 75#include <contrib/dev/acpica/include/acpi.h> 76#include <dev/acpica/acpivar.h> 77 78#include <dev/drm2/ttm/ttm_bo_api.h> 79#include <dev/drm2/ttm/ttm_bo_driver.h> 80#include <dev/drm2/ttm/ttm_placement.h> 81#include <dev/drm2/ttm/ttm_module.h> 82#include <dev/drm2/ttm/ttm_execbuf_util.h> 83 84#include "radeon_family.h" 85#include "radeon_mode.h" 86#include "radeon_reg.h" 87 88/* 89 * Modules parameters. 90 */ 91extern int radeon_no_wb; 92extern int radeon_modeset; 93extern int radeon_dynclks; 94extern int radeon_r4xx_atom; 95extern int radeon_agpmode; 96extern int radeon_vram_limit; 97extern int radeon_gart_size; 98extern int radeon_benchmarking; 99extern int radeon_testing; 100extern int radeon_connector_table; 101extern int radeon_tv; 102extern int radeon_audio; 103extern int radeon_disp_priority; 104extern int radeon_hw_i2c; 105extern int radeon_pcie_gen2; 106extern int radeon_msi; 107extern int radeon_lockup_timeout; 108 109/* 110 * Copy from radeon_drv.h so we don't have to include both and have conflicting 111 * symbol; 112 */ 113#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 114#define RADEON_FENCE_JIFFIES_TIMEOUT (DRM_HZ / 2) 115/* RADEON_IB_POOL_SIZE must be a power of 2 */ 116#define RADEON_IB_POOL_SIZE 16 117#define RADEON_DEBUGFS_MAX_COMPONENTS 32 118#define RADEONFB_CONN_LIMIT 4 119#define RADEON_BIOS_NUM_SCRATCH 8 120 121/* max number of rings */ 122#define RADEON_NUM_RINGS 5 123 124/* fence seq are set to this number when signaled */ 125#define RADEON_FENCE_SIGNALED_SEQ 0LL 126 127/* internal ring indices */ 128/* r1xx+ has gfx CP ring */ 129#define RADEON_RING_TYPE_GFX_INDEX 0 130 131/* cayman has 2 compute CP rings */ 132#define CAYMAN_RING_TYPE_CP1_INDEX 1 133#define CAYMAN_RING_TYPE_CP2_INDEX 2 134 135/* R600+ has an async dma ring */ 136#define R600_RING_TYPE_DMA_INDEX 3 137/* cayman add a second async dma ring */ 138#define CAYMAN_RING_TYPE_DMA1_INDEX 4 139 140/* hardcode those limit for now */ 141#define RADEON_VA_IB_OFFSET (1 << 20) 142#define RADEON_VA_RESERVED_SIZE (8 << 20) 143#define RADEON_IB_VM_MAX_SIZE (64 << 10) 144 145/* reset flags */ 146#define RADEON_RESET_GFX (1 << 0) 147#define RADEON_RESET_COMPUTE (1 << 1) 148#define RADEON_RESET_DMA (1 << 2) 149 150/* 151 * Errata workarounds. 152 */ 153enum radeon_pll_errata { 154 CHIP_ERRATA_R300_CG = 0x00000001, 155 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002, 156 CHIP_ERRATA_PLL_DELAY = 0x00000004 157}; 158 159 160struct radeon_device; 161 162 163/* 164 * BIOS. 165 */ 166bool radeon_get_bios(struct radeon_device *rdev); 167 168/* 169 * Dummy page 170 */ 171struct radeon_dummy_page { 172 drm_dma_handle_t *dmah; 173 dma_addr_t addr; 174}; 175int radeon_dummy_page_init(struct radeon_device *rdev); 176void radeon_dummy_page_fini(struct radeon_device *rdev); 177 178 179/* 180 * Clocks 181 */ 182struct radeon_clock { 183 struct radeon_pll p1pll; 184 struct radeon_pll p2pll; 185 struct radeon_pll dcpll; 186 struct radeon_pll spll; 187 struct radeon_pll mpll; 188 /* 10 Khz units */ 189 uint32_t default_mclk; 190 uint32_t default_sclk; 191 uint32_t default_dispclk; 192 uint32_t dp_extclk; 193 uint32_t max_pixel_clock; 194}; 195 196/* 197 * Power management 198 */ 199int radeon_pm_init(struct radeon_device *rdev); 200void radeon_pm_fini(struct radeon_device *rdev); 201void radeon_pm_compute_clocks(struct radeon_device *rdev); 202void radeon_pm_suspend(struct radeon_device *rdev); 203void radeon_pm_resume(struct radeon_device *rdev); 204void radeon_combios_get_power_modes(struct radeon_device *rdev); 205void radeon_atombios_get_power_modes(struct radeon_device *rdev); 206void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 207void rs690_pm_info(struct radeon_device *rdev); 208extern int rv6xx_get_temp(struct radeon_device *rdev); 209extern int rv770_get_temp(struct radeon_device *rdev); 210extern int evergreen_get_temp(struct radeon_device *rdev); 211extern int sumo_get_temp(struct radeon_device *rdev); 212extern int si_get_temp(struct radeon_device *rdev); 213extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, 214 unsigned *bankh, unsigned *mtaspect, 215 unsigned *tile_split); 216 217/* 218 * Fences. 219 */ 220struct radeon_fence_driver { 221 uint32_t scratch_reg; 222 uint64_t gpu_addr; 223 volatile uint32_t *cpu_addr; 224 /* sync_seq is protected by ring emission lock */ 225 uint64_t sync_seq[RADEON_NUM_RINGS]; 226 atomic64_t last_seq; 227 unsigned long last_activity; 228 bool initialized; 229}; 230 231struct radeon_fence { 232 struct radeon_device *rdev; 233 unsigned int kref; 234 /* protected by radeon_fence.lock */ 235 uint64_t seq; 236 /* RB, DMA, etc. */ 237 unsigned ring; 238}; 239 240int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 241int radeon_fence_driver_init(struct radeon_device *rdev); 242void radeon_fence_driver_fini(struct radeon_device *rdev); 243void radeon_fence_driver_force_completion(struct radeon_device *rdev); 244int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); 245void radeon_fence_process(struct radeon_device *rdev, int ring); 246bool radeon_fence_signaled(struct radeon_fence *fence); 247int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 248int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 249int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); 250int radeon_fence_wait_any(struct radeon_device *rdev, 251 struct radeon_fence **fences, 252 bool intr); 253struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 254void radeon_fence_unref(struct radeon_fence **fence); 255unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); 256bool radeon_fence_need_sync(struct radeon_fence *fence, int ring); 257void radeon_fence_note_sync(struct radeon_fence *fence, int ring); 258static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a, 259 struct radeon_fence *b) 260{ 261 if (!a) { 262 return b; 263 } 264 265 if (!b) { 266 return a; 267 } 268 269 KASSERT(a->ring == b->ring, ("\"a\" and \"b\" belongs to different rings")); 270 271 if (a->seq > b->seq) { 272 return a; 273 } else { 274 return b; 275 } 276} 277 278static inline bool radeon_fence_is_earlier(struct radeon_fence *a, 279 struct radeon_fence *b) 280{ 281 if (!a) { 282 return false; 283 } 284 285 if (!b) { 286 return true; 287 } 288 289 KASSERT(a->ring == b->ring, ("\"a\" and \"b\" belongs to different rings")); 290 291 return a->seq < b->seq; 292} 293 294/* 295 * Tiling registers 296 */ 297struct radeon_surface_reg { 298 struct radeon_bo *bo; 299}; 300 301#define RADEON_GEM_MAX_SURFACES 8 302 303/* 304 * TTM. 305 */ 306struct radeon_mman { 307 struct ttm_bo_global_ref bo_global_ref; 308 struct drm_global_reference mem_global_ref; 309 struct ttm_bo_device bdev; 310 bool mem_global_referenced; 311 bool initialized; 312}; 313 314/* bo virtual address in a specific vm */ 315struct radeon_bo_va { 316 /* protected by bo being reserved */ 317 struct list_head bo_list; 318 uint64_t soffset; 319 uint64_t eoffset; 320 uint32_t flags; 321 bool valid; 322 unsigned ref_count; 323 324 /* protected by vm mutex */ 325 struct list_head vm_list; 326 327 /* constant after initialization */ 328 struct radeon_vm *vm; 329 struct radeon_bo *bo; 330}; 331 332struct radeon_bo { 333 /* Protected by gem.mutex */ 334 struct list_head list; 335 /* Protected by tbo.reserved */ 336 u32 placements[3]; 337 struct ttm_placement placement; 338 struct ttm_buffer_object tbo; 339 struct ttm_bo_kmap_obj kmap; 340 unsigned pin_count; 341 void *kptr; 342 u32 tiling_flags; 343 u32 pitch; 344 int surface_reg; 345 /* list of all virtual address to which this bo 346 * is associated to 347 */ 348 struct list_head va; 349 /* Constant after initialization */ 350 struct radeon_device *rdev; 351 struct drm_gem_object gem_base; 352 353 struct ttm_bo_kmap_obj dma_buf_vmap; 354 int vmapping_count; 355}; 356#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 357 358struct radeon_bo_list { 359 struct ttm_validate_buffer tv; 360 struct radeon_bo *bo; 361 uint64_t gpu_offset; 362 unsigned rdomain; 363 unsigned wdomain; 364 u32 tiling_flags; 365}; 366 367/* sub-allocation manager, it has to be protected by another lock. 368 * By conception this is an helper for other part of the driver 369 * like the indirect buffer or semaphore, which both have their 370 * locking. 371 * 372 * Principe is simple, we keep a list of sub allocation in offset 373 * order (first entry has offset == 0, last entry has the highest 374 * offset). 375 * 376 * When allocating new object we first check if there is room at 377 * the end total_size - (last_object_offset + last_object_size) >= 378 * alloc_size. If so we allocate new object there. 379 * 380 * When there is not enough room at the end, we start waiting for 381 * each sub object until we reach object_offset+object_size >= 382 * alloc_size, this object then become the sub object we return. 383 * 384 * Alignment can't be bigger than page size. 385 * 386 * Hole are not considered for allocation to keep things simple. 387 * Assumption is that there won't be hole (all object on same 388 * alignment). 389 */ 390struct radeon_sa_manager { 391 struct cv wq; 392 struct sx wq_lock; 393 struct radeon_bo *bo; 394 struct list_head *hole; 395 struct list_head flist[RADEON_NUM_RINGS]; 396 struct list_head olist; 397 unsigned size; 398 uint64_t gpu_addr; 399 void *cpu_ptr; 400 uint32_t domain; 401}; 402 403struct radeon_sa_bo; 404 405/* sub-allocation buffer */ 406struct radeon_sa_bo { 407 struct list_head olist; 408 struct list_head flist; 409 struct radeon_sa_manager *manager; 410 unsigned soffset; 411 unsigned eoffset; 412 struct radeon_fence *fence; 413}; 414 415/* 416 * GEM objects. 417 */ 418struct radeon_gem { 419 struct sx mutex; 420 struct list_head objects; 421}; 422 423int radeon_gem_init(struct radeon_device *rdev); 424void radeon_gem_fini(struct radeon_device *rdev); 425int radeon_gem_object_create(struct radeon_device *rdev, int size, 426 int alignment, int initial_domain, 427 bool discardable, bool kernel, 428 struct drm_gem_object **obj); 429 430int radeon_mode_dumb_create(struct drm_file *file_priv, 431 struct drm_device *dev, 432 struct drm_mode_create_dumb *args); 433int radeon_mode_dumb_mmap(struct drm_file *filp, 434 struct drm_device *dev, 435 uint32_t handle, uint64_t *offset_p); 436int radeon_mode_dumb_destroy(struct drm_file *file_priv, 437 struct drm_device *dev, 438 uint32_t handle); 439 440/* 441 * Semaphores. 442 */ 443/* everything here is constant */ 444struct radeon_semaphore { 445 struct radeon_sa_bo *sa_bo; 446 signed waiters; 447 uint64_t gpu_addr; 448}; 449 450int radeon_semaphore_create(struct radeon_device *rdev, 451 struct radeon_semaphore **semaphore); 452void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 453 struct radeon_semaphore *semaphore); 454void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 455 struct radeon_semaphore *semaphore); 456int radeon_semaphore_sync_rings(struct radeon_device *rdev, 457 struct radeon_semaphore *semaphore, 458 int signaler, int waiter); 459void radeon_semaphore_free(struct radeon_device *rdev, 460 struct radeon_semaphore **semaphore, 461 struct radeon_fence *fence); 462 463/* 464 * GART structures, functions & helpers 465 */ 466struct radeon_mc; 467 468#define RADEON_GPU_PAGE_SIZE 4096 469#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 470#define RADEON_GPU_PAGE_SHIFT 12 471#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK) 472 473struct radeon_gart { 474 drm_dma_handle_t *dmah; 475 dma_addr_t table_addr; 476 struct radeon_bo *robj; 477 void *ptr; 478 unsigned num_gpu_pages; 479 unsigned num_cpu_pages; 480 unsigned table_size; 481 vm_page_t *pages; 482 dma_addr_t *pages_addr; 483 bool ready; 484}; 485 486int radeon_gart_table_ram_alloc(struct radeon_device *rdev); 487void radeon_gart_table_ram_free(struct radeon_device *rdev); 488int radeon_gart_table_vram_alloc(struct radeon_device *rdev); 489void radeon_gart_table_vram_free(struct radeon_device *rdev); 490int radeon_gart_table_vram_pin(struct radeon_device *rdev); 491void radeon_gart_table_vram_unpin(struct radeon_device *rdev); 492int radeon_gart_init(struct radeon_device *rdev); 493void radeon_gart_fini(struct radeon_device *rdev); 494void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 495 int pages); 496int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 497 int pages, vm_page_t *pagelist, 498 dma_addr_t *dma_addr); 499void radeon_gart_restore(struct radeon_device *rdev); 500 501 502/* 503 * GPU MC structures, functions & helpers 504 */ 505struct radeon_mc { 506 resource_size_t aper_size; 507 resource_size_t aper_base; 508 resource_size_t agp_base; 509 /* for some chips with <= 32MB we need to lie 510 * about vram size near mc fb location */ 511 u64 mc_vram_size; 512 u64 visible_vram_size; 513 u64 gtt_size; 514 u64 gtt_start; 515 u64 gtt_end; 516 u64 vram_start; 517 u64 vram_end; 518 unsigned vram_width; 519 u64 real_vram_size; 520 int vram_mtrr; 521 bool vram_is_ddr; 522 bool igp_sideport_enabled; 523 u64 gtt_base_align; 524}; 525 526bool radeon_combios_sideport_present(struct radeon_device *rdev); 527bool radeon_atombios_sideport_present(struct radeon_device *rdev); 528 529/* 530 * GPU scratch registers structures, functions & helpers 531 */ 532struct radeon_scratch { 533 unsigned num_reg; 534 uint32_t reg_base; 535 bool free[32]; 536 uint32_t reg[32]; 537}; 538 539int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg); 540void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); 541 542 543/* 544 * IRQS. 545 */ 546 547struct radeon_unpin_work { 548 struct task work; 549 struct radeon_device *rdev; 550 int crtc_id; 551 struct radeon_fence *fence; 552 struct drm_pending_vblank_event *event; 553 struct radeon_bo *old_rbo; 554 u64 new_crtc_base; 555}; 556 557struct r500_irq_stat_regs { 558 u32 disp_int; 559 u32 hdmi0_status; 560}; 561 562struct r600_irq_stat_regs { 563 u32 disp_int; 564 u32 disp_int_cont; 565 u32 disp_int_cont2; 566 u32 d1grph_int; 567 u32 d2grph_int; 568 u32 hdmi0_status; 569 u32 hdmi1_status; 570}; 571 572struct evergreen_irq_stat_regs { 573 u32 disp_int; 574 u32 disp_int_cont; 575 u32 disp_int_cont2; 576 u32 disp_int_cont3; 577 u32 disp_int_cont4; 578 u32 disp_int_cont5; 579 u32 d1grph_int; 580 u32 d2grph_int; 581 u32 d3grph_int; 582 u32 d4grph_int; 583 u32 d5grph_int; 584 u32 d6grph_int; 585 u32 afmt_status1; 586 u32 afmt_status2; 587 u32 afmt_status3; 588 u32 afmt_status4; 589 u32 afmt_status5; 590 u32 afmt_status6; 591}; 592 593union radeon_irq_stat_regs { 594 struct r500_irq_stat_regs r500; 595 struct r600_irq_stat_regs r600; 596 struct evergreen_irq_stat_regs evergreen; 597}; 598 599#define RADEON_MAX_HPD_PINS 6 600#define RADEON_MAX_CRTCS 6 601#define RADEON_MAX_AFMT_BLOCKS 6 602 603struct radeon_irq { 604 bool installed; 605 struct mtx lock; 606 atomic_t ring_int[RADEON_NUM_RINGS]; 607 bool crtc_vblank_int[RADEON_MAX_CRTCS]; 608 atomic_t pflip[RADEON_MAX_CRTCS]; 609 wait_queue_head_t vblank_queue; 610 bool hpd[RADEON_MAX_HPD_PINS]; 611 bool afmt[RADEON_MAX_AFMT_BLOCKS]; 612 union radeon_irq_stat_regs stat_regs; 613}; 614 615int radeon_irq_kms_init(struct radeon_device *rdev); 616void radeon_irq_kms_fini(struct radeon_device *rdev); 617void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); 618void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); 619void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); 620void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); 621void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block); 622void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block); 623void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 624void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 625 626/* 627 * CP & rings. 628 */ 629 630struct radeon_ib { 631 struct radeon_sa_bo *sa_bo; 632 uint32_t length_dw; 633 uint64_t gpu_addr; 634 uint32_t *ptr; 635 int ring; 636 struct radeon_fence *fence; 637 struct radeon_vm *vm; 638 bool is_const_ib; 639 struct radeon_fence *sync_to[RADEON_NUM_RINGS]; 640 struct radeon_semaphore *semaphore; 641}; 642 643struct radeon_ring { 644 struct radeon_bo *ring_obj; 645 volatile uint32_t *ring; 646 unsigned rptr; 647 unsigned rptr_offs; 648 unsigned rptr_reg; 649 unsigned rptr_save_reg; 650 u64 next_rptr_gpu_addr; 651 volatile u32 *next_rptr_cpu_addr; 652 unsigned wptr; 653 unsigned wptr_old; 654 unsigned wptr_reg; 655 unsigned ring_size; 656 unsigned ring_free_dw; 657 int count_dw; 658 unsigned long last_activity; 659 unsigned last_rptr; 660 uint64_t gpu_addr; 661 uint32_t align_mask; 662 uint32_t ptr_mask; 663 bool ready; 664 u32 ptr_reg_shift; 665 u32 ptr_reg_mask; 666 u32 nop; 667 u32 idx; 668 u64 last_semaphore_signal_addr; 669 u64 last_semaphore_wait_addr; 670}; 671 672/* 673 * VM 674 */ 675 676/* maximum number of VMIDs */ 677#define RADEON_NUM_VM 16 678 679/* defines number of bits in page table versus page directory, 680 * a page is 4KB so we have 12 bits offset, 9 bits in the page 681 * table and the remaining 19 bits are in the page directory */ 682#define RADEON_VM_BLOCK_SIZE 9 683 684/* number of entries in page table */ 685#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) 686 687struct radeon_vm { 688 struct list_head list; 689 struct list_head va; 690 unsigned id; 691 692 /* contains the page directory */ 693 struct radeon_sa_bo *page_directory; 694 uint64_t pd_gpu_addr; 695 696 /* array of page tables, one for each page directory entry */ 697 struct radeon_sa_bo **page_tables; 698 699 struct sx mutex; 700 /* last fence for cs using this vm */ 701 struct radeon_fence *fence; 702 /* last flush or NULL if we still need to flush */ 703 struct radeon_fence *last_flush; 704}; 705 706struct radeon_vm_manager { 707 struct sx lock; 708 struct list_head lru_vm; 709 struct radeon_fence *active[RADEON_NUM_VM]; 710 struct radeon_sa_manager sa_manager; 711 uint32_t max_pfn; 712 /* number of VMIDs */ 713 unsigned nvm; 714 /* vram base address for page table entry */ 715 u64 vram_base_offset; 716 /* is vm enabled? */ 717 bool enabled; 718}; 719 720/* 721 * file private structure 722 */ 723struct radeon_fpriv { 724 struct radeon_vm vm; 725}; 726 727/* 728 * R6xx+ IH ring 729 */ 730struct r600_ih { 731 struct radeon_bo *ring_obj; 732 volatile uint32_t *ring; 733 unsigned rptr; 734 unsigned ring_size; 735 uint64_t gpu_addr; 736 uint32_t ptr_mask; 737 atomic_t lock; 738 bool enabled; 739}; 740 741struct r600_blit_cp_primitives { 742 void (*set_render_target)(struct radeon_device *rdev, int format, 743 int w, int h, u64 gpu_addr); 744 void (*cp_set_surface_sync)(struct radeon_device *rdev, 745 u32 sync_type, u32 size, 746 u64 mc_addr); 747 void (*set_shaders)(struct radeon_device *rdev); 748 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); 749 void (*set_tex_resource)(struct radeon_device *rdev, 750 int format, int w, int h, int pitch, 751 u64 gpu_addr, u32 size); 752 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, 753 int x2, int y2); 754 void (*draw_auto)(struct radeon_device *rdev); 755 void (*set_default_state)(struct radeon_device *rdev); 756}; 757 758struct r600_blit { 759 struct radeon_bo *shader_obj; 760 struct r600_blit_cp_primitives primitives; 761 int max_dim; 762 int ring_size_common; 763 int ring_size_per_loop; 764 u64 shader_gpu_addr; 765 u32 vs_offset, ps_offset; 766 u32 state_offset; 767 u32 state_len; 768}; 769 770/* 771 * SI RLC stuff 772 */ 773struct si_rlc { 774 /* for power gating */ 775 struct radeon_bo *save_restore_obj; 776 uint64_t save_restore_gpu_addr; 777 /* for clear state */ 778 struct radeon_bo *clear_state_obj; 779 uint64_t clear_state_gpu_addr; 780}; 781 782int radeon_ib_get(struct radeon_device *rdev, int ring, 783 struct radeon_ib *ib, struct radeon_vm *vm, 784 unsigned size); 785void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 786int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 787 struct radeon_ib *const_ib); 788int radeon_ib_pool_init(struct radeon_device *rdev); 789void radeon_ib_pool_fini(struct radeon_device *rdev); 790int radeon_ib_ring_tests(struct radeon_device *rdev); 791/* Ring access between begin & end cannot sleep */ 792bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, 793 struct radeon_ring *ring); 794void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); 795int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 796int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 797void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); 798void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); 799void radeon_ring_undo(struct radeon_ring *ring); 800void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 801int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 802void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring); 803void radeon_ring_lockup_update(struct radeon_ring *ring); 804bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 805unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, 806 uint32_t **data); 807int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, 808 unsigned size, uint32_t *data); 809int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, 810 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 811 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); 812void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); 813 814 815/* r600 async dma */ 816void r600_dma_stop(struct radeon_device *rdev); 817int r600_dma_resume(struct radeon_device *rdev); 818void r600_dma_fini(struct radeon_device *rdev); 819 820void cayman_dma_stop(struct radeon_device *rdev); 821int cayman_dma_resume(struct radeon_device *rdev); 822void cayman_dma_fini(struct radeon_device *rdev); 823 824/* 825 * CS. 826 */ 827struct radeon_cs_reloc { 828 struct drm_gem_object *gobj; 829 struct radeon_bo *robj; 830 struct radeon_bo_list lobj; 831 uint32_t handle; 832 uint32_t flags; 833}; 834 835struct radeon_cs_chunk { 836 uint32_t chunk_id; 837 uint32_t length_dw; 838 int kpage_idx[2]; 839 uint32_t *kpage[2]; 840 uint32_t *kdata; 841 void __user *user_ptr; 842 int last_copied_page; 843 int last_page_index; 844}; 845 846struct radeon_cs_parser { 847 device_t dev; 848 struct radeon_device *rdev; 849 struct drm_file *filp; 850 /* chunks */ 851 unsigned nchunks; 852 struct radeon_cs_chunk *chunks; 853 uint64_t *chunks_array; 854 /* IB */ 855 unsigned idx; 856 /* relocations */ 857 unsigned nrelocs; 858 struct radeon_cs_reloc *relocs; 859 struct radeon_cs_reloc **relocs_ptr; 860 struct list_head validated; 861 unsigned dma_reloc_idx; 862 /* indices of various chunks */ 863 int chunk_ib_idx; 864 int chunk_relocs_idx; 865 int chunk_flags_idx; 866 int chunk_const_ib_idx; 867 struct radeon_ib ib; 868 struct radeon_ib const_ib; 869 void *track; 870 unsigned family; 871 int parser_error; 872 u32 cs_flags; 873 u32 ring; 874 s32 priority; 875}; 876 877extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); 878extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); 879 880struct radeon_cs_packet { 881 unsigned idx; 882 unsigned type; 883 unsigned reg; 884 unsigned opcode; 885 int count; 886 unsigned one_reg_wr; 887}; 888 889typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p, 890 struct radeon_cs_packet *pkt, 891 unsigned idx, unsigned reg); 892typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, 893 struct radeon_cs_packet *pkt); 894 895 896/* 897 * AGP 898 */ 899int radeon_agp_init(struct radeon_device *rdev); 900void radeon_agp_resume(struct radeon_device *rdev); 901void radeon_agp_suspend(struct radeon_device *rdev); 902void radeon_agp_fini(struct radeon_device *rdev); 903 904 905/* 906 * Writeback 907 */ 908struct radeon_wb { 909 struct radeon_bo *wb_obj; 910 volatile uint32_t *wb; 911 uint64_t gpu_addr; 912 bool enabled; 913 bool use_event; 914}; 915 916#define RADEON_WB_SCRATCH_OFFSET 0 917#define RADEON_WB_RING0_NEXT_RPTR 256 918#define RADEON_WB_CP_RPTR_OFFSET 1024 919#define RADEON_WB_CP1_RPTR_OFFSET 1280 920#define RADEON_WB_CP2_RPTR_OFFSET 1536 921#define R600_WB_DMA_RPTR_OFFSET 1792 922#define R600_WB_IH_WPTR_OFFSET 2048 923#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 924#define R600_WB_EVENT_OFFSET 3072 925 926/** 927 * struct radeon_pm - power management datas 928 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) 929 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) 930 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) 931 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) 932 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) 933 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) 934 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) 935 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) 936 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) 937 * @sclk: GPU clock Mhz (core bandwidth depends of this clock) 938 * @needed_bandwidth: current bandwidth needs 939 * 940 * It keeps track of various data needed to take powermanagement decision. 941 * Bandwidth need is used to determine minimun clock of the GPU and memory. 942 * Equation between gpu/memory clock and available bandwidth is hw dependent 943 * (type of memory, bus size, efficiency, ...) 944 */ 945 946enum radeon_pm_method { 947 PM_METHOD_PROFILE, 948 PM_METHOD_DYNPM, 949}; 950 951enum radeon_dynpm_state { 952 DYNPM_STATE_DISABLED, 953 DYNPM_STATE_MINIMUM, 954 DYNPM_STATE_PAUSED, 955 DYNPM_STATE_ACTIVE, 956 DYNPM_STATE_SUSPENDED, 957}; 958enum radeon_dynpm_action { 959 DYNPM_ACTION_NONE, 960 DYNPM_ACTION_MINIMUM, 961 DYNPM_ACTION_DOWNCLOCK, 962 DYNPM_ACTION_UPCLOCK, 963 DYNPM_ACTION_DEFAULT 964}; 965 966enum radeon_voltage_type { 967 VOLTAGE_NONE = 0, 968 VOLTAGE_GPIO, 969 VOLTAGE_VDDC, 970 VOLTAGE_SW 971}; 972 973enum radeon_pm_state_type { 974 POWER_STATE_TYPE_DEFAULT, 975 POWER_STATE_TYPE_POWERSAVE, 976 POWER_STATE_TYPE_BATTERY, 977 POWER_STATE_TYPE_BALANCED, 978 POWER_STATE_TYPE_PERFORMANCE, 979}; 980 981enum radeon_pm_profile_type { 982 PM_PROFILE_DEFAULT, 983 PM_PROFILE_AUTO, 984 PM_PROFILE_LOW, 985 PM_PROFILE_MID, 986 PM_PROFILE_HIGH, 987}; 988 989#define PM_PROFILE_DEFAULT_IDX 0 990#define PM_PROFILE_LOW_SH_IDX 1 991#define PM_PROFILE_MID_SH_IDX 2 992#define PM_PROFILE_HIGH_SH_IDX 3 993#define PM_PROFILE_LOW_MH_IDX 4 994#define PM_PROFILE_MID_MH_IDX 5 995#define PM_PROFILE_HIGH_MH_IDX 6 996#define PM_PROFILE_MAX 7 997 998struct radeon_pm_profile { 999 int dpms_off_ps_idx; 1000 int dpms_on_ps_idx; 1001 int dpms_off_cm_idx; 1002 int dpms_on_cm_idx; 1003}; 1004 1005enum radeon_int_thermal_type { 1006 THERMAL_TYPE_NONE, 1007 THERMAL_TYPE_RV6XX, 1008 THERMAL_TYPE_RV770, 1009 THERMAL_TYPE_EVERGREEN, 1010 THERMAL_TYPE_SUMO, 1011 THERMAL_TYPE_NI, 1012 THERMAL_TYPE_SI, 1013}; 1014 1015struct radeon_voltage { 1016 enum radeon_voltage_type type; 1017 /* gpio voltage */ 1018 struct radeon_gpio_rec gpio; 1019 u32 delay; /* delay in usec from voltage drop to sclk change */ 1020 bool active_high; /* voltage drop is active when bit is high */ 1021 /* VDDC voltage */ 1022 u8 vddc_id; /* index into vddc voltage table */ 1023 u8 vddci_id; /* index into vddci voltage table */ 1024 bool vddci_enabled; 1025 /* r6xx+ sw */ 1026 u16 voltage; 1027 /* evergreen+ vddci */ 1028 u16 vddci; 1029}; 1030 1031/* clock mode flags */ 1032#define RADEON_PM_MODE_NO_DISPLAY (1 << 0) 1033 1034struct radeon_pm_clock_info { 1035 /* memory clock */ 1036 u32 mclk; 1037 /* engine clock */ 1038 u32 sclk; 1039 /* voltage info */ 1040 struct radeon_voltage voltage; 1041 /* standardized clock flags */ 1042 u32 flags; 1043}; 1044 1045/* state flags */ 1046#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0) 1047 1048struct radeon_power_state { 1049 enum radeon_pm_state_type type; 1050 struct radeon_pm_clock_info *clock_info; 1051 /* number of valid clock modes in this power state */ 1052 int num_clock_modes; 1053 struct radeon_pm_clock_info *default_clock_mode; 1054 /* standardized state flags */ 1055 u32 flags; 1056 u32 misc; /* vbios specific flags */ 1057 u32 misc2; /* vbios specific flags */ 1058 int pcie_lanes; /* pcie lanes */ 1059}; 1060 1061/* 1062 * Some modes are overclocked by very low value, accept them 1063 */ 1064#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */ 1065 1066struct radeon_pm { 1067 struct sx mutex; 1068 /* write locked while reprogramming mclk */ 1069 struct sx mclk_lock; 1070 u32 active_crtcs; 1071 int active_crtc_count; 1072 int req_vblank; 1073 bool vblank_sync; 1074 fixed20_12 max_bandwidth; 1075 fixed20_12 igp_sideport_mclk; 1076 fixed20_12 igp_system_mclk; 1077 fixed20_12 igp_ht_link_clk; 1078 fixed20_12 igp_ht_link_width; 1079 fixed20_12 k8_bandwidth; 1080 fixed20_12 sideport_bandwidth; 1081 fixed20_12 ht_bandwidth; 1082 fixed20_12 core_bandwidth; 1083 fixed20_12 sclk; 1084 fixed20_12 mclk; 1085 fixed20_12 needed_bandwidth; 1086 struct radeon_power_state *power_state; 1087 /* number of valid power states */ 1088 int num_power_states; 1089 int current_power_state_index; 1090 int current_clock_mode_index; 1091 int requested_power_state_index; 1092 int requested_clock_mode_index; 1093 int default_power_state_index; 1094 u32 current_sclk; 1095 u32 current_mclk; 1096 u16 current_vddc; 1097 u16 current_vddci; 1098 u32 default_sclk; 1099 u32 default_mclk; 1100 u16 default_vddc; 1101 u16 default_vddci; 1102 struct radeon_i2c_chan *i2c_bus; 1103 /* selected pm method */ 1104 enum radeon_pm_method pm_method; 1105 /* dynpm power management */ 1106#ifdef DUMBBELL_WIP 1107 struct delayed_work dynpm_idle_work; 1108#endif /* DUMBBELL_WIP */ 1109 enum radeon_dynpm_state dynpm_state; 1110 enum radeon_dynpm_action dynpm_planned_action; 1111 unsigned long dynpm_action_timeout; 1112 bool dynpm_can_upclock; 1113 bool dynpm_can_downclock; 1114 /* profile-based power management */ 1115 enum radeon_pm_profile_type profile; 1116 int profile_index; 1117 struct radeon_pm_profile profiles[PM_PROFILE_MAX]; 1118 /* internal thermal controller on rv6xx+ */ 1119 enum radeon_int_thermal_type int_thermal_type; 1120#ifdef DUMBBELL_WIP 1121 struct device *int_hwmon_dev; 1122#endif /* DUMBBELL_WIP */ 1123}; 1124 1125int radeon_pm_get_type_index(struct radeon_device *rdev, 1126 enum radeon_pm_state_type ps_type, 1127 int instance); 1128 1129struct r600_audio { 1130 int channels; 1131 int rate; 1132 int bits_per_sample; 1133 u8 status_bits; 1134 u8 category_code; 1135}; 1136 1137/* 1138 * Benchmarking 1139 */ 1140void radeon_benchmark(struct radeon_device *rdev, int test_number); 1141 1142 1143/* 1144 * Testing 1145 */ 1146void radeon_test_moves(struct radeon_device *rdev); 1147void radeon_test_ring_sync(struct radeon_device *rdev, 1148 struct radeon_ring *cpA, 1149 struct radeon_ring *cpB); 1150void radeon_test_syncing(struct radeon_device *rdev); 1151 1152 1153/* 1154 * Debugfs 1155 */ 1156struct radeon_debugfs { 1157 struct drm_info_list *files; 1158 unsigned num_files; 1159}; 1160 1161int radeon_debugfs_add_files(struct radeon_device *rdev, 1162 struct drm_info_list *files, 1163 unsigned nfiles); 1164int radeon_debugfs_fence_init(struct radeon_device *rdev); 1165 1166 1167/* 1168 * ASIC specific functions. 1169 */ 1170struct radeon_asic { 1171 int (*init)(struct radeon_device *rdev); 1172 void (*fini)(struct radeon_device *rdev); 1173 int (*resume)(struct radeon_device *rdev); 1174 int (*suspend)(struct radeon_device *rdev); 1175 void (*vga_set_state)(struct radeon_device *rdev, bool state); 1176 int (*asic_reset)(struct radeon_device *rdev); 1177 /* ioctl hw specific callback. Some hw might want to perform special 1178 * operation on specific ioctl. For instance on wait idle some hw 1179 * might want to perform and HDP flush through MMIO as it seems that 1180 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed 1181 * through ring. 1182 */ 1183 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); 1184 /* check if 3D engine is idle */ 1185 bool (*gui_idle)(struct radeon_device *rdev); 1186 /* wait for mc_idle */ 1187 int (*mc_wait_for_idle)(struct radeon_device *rdev); 1188 /* gart */ 1189 struct { 1190 void (*tlb_flush)(struct radeon_device *rdev); 1191 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); 1192 } gart; 1193 struct { 1194 int (*init)(struct radeon_device *rdev); 1195 void (*fini)(struct radeon_device *rdev); 1196 1197 u32 pt_ring_index; 1198 void (*set_page)(struct radeon_device *rdev, uint64_t pe, 1199 uint64_t addr, unsigned count, 1200 uint32_t incr, uint32_t flags); 1201 } vm; 1202 /* ring specific callbacks */ 1203 struct { 1204 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1205 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); 1206 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); 1207 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, 1208 struct radeon_semaphore *semaphore, bool emit_wait); 1209 int (*cs_parse)(struct radeon_cs_parser *p); 1210 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); 1211 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1212 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1213 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); 1214 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1215 } ring[RADEON_NUM_RINGS]; 1216 /* irqs */ 1217 struct { 1218 int (*set)(struct radeon_device *rdev); 1219 irqreturn_t (*process)(struct radeon_device *rdev); 1220 } irq; 1221 /* displays */ 1222 struct { 1223 /* display watermarks */ 1224 void (*bandwidth_update)(struct radeon_device *rdev); 1225 /* get frame count */ 1226 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 1227 /* wait for vblank */ 1228 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); 1229 /* set backlight level */ 1230 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); 1231 /* get backlight level */ 1232 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); 1233 } display; 1234 /* copy functions for bo handling */ 1235 struct { 1236 int (*blit)(struct radeon_device *rdev, 1237 uint64_t src_offset, 1238 uint64_t dst_offset, 1239 unsigned num_gpu_pages, 1240 struct radeon_fence **fence); 1241 u32 blit_ring_index; 1242 int (*dma)(struct radeon_device *rdev, 1243 uint64_t src_offset, 1244 uint64_t dst_offset, 1245 unsigned num_gpu_pages, 1246 struct radeon_fence **fence); 1247 u32 dma_ring_index; 1248 /* method used for bo copy */ 1249 int (*copy)(struct radeon_device *rdev, 1250 uint64_t src_offset, 1251 uint64_t dst_offset, 1252 unsigned num_gpu_pages, 1253 struct radeon_fence **fence); 1254 /* ring used for bo copies */ 1255 u32 copy_ring_index; 1256 } copy; 1257 /* surfaces */ 1258 struct { 1259 int (*set_reg)(struct radeon_device *rdev, int reg, 1260 uint32_t tiling_flags, uint32_t pitch, 1261 uint32_t offset, uint32_t obj_size); 1262 void (*clear_reg)(struct radeon_device *rdev, int reg); 1263 } surface; 1264 /* hotplug detect */ 1265 struct { 1266 void (*init)(struct radeon_device *rdev); 1267 void (*fini)(struct radeon_device *rdev); 1268 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1269 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1270 } hpd; 1271 /* power management */ 1272 struct { 1273 void (*misc)(struct radeon_device *rdev); 1274 void (*prepare)(struct radeon_device *rdev); 1275 void (*finish)(struct radeon_device *rdev); 1276 void (*init_profile)(struct radeon_device *rdev); 1277 void (*get_dynpm_state)(struct radeon_device *rdev); 1278 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 1279 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 1280 uint32_t (*get_memory_clock)(struct radeon_device *rdev); 1281 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 1282 int (*get_pcie_lanes)(struct radeon_device *rdev); 1283 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 1284 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 1285 } pm; 1286 /* pageflipping */ 1287 struct { 1288 void (*pre_page_flip)(struct radeon_device *rdev, int crtc); 1289 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); 1290 void (*post_page_flip)(struct radeon_device *rdev, int crtc); 1291 } pflip; 1292}; 1293 1294/* 1295 * Asic structures 1296 */ 1297struct r100_asic { 1298 const unsigned *reg_safe_bm; 1299 unsigned reg_safe_bm_size; 1300 u32 hdp_cntl; 1301}; 1302 1303struct r300_asic { 1304 const unsigned *reg_safe_bm; 1305 unsigned reg_safe_bm_size; 1306 u32 resync_scratch; 1307 u32 hdp_cntl; 1308}; 1309 1310struct r600_asic { 1311 unsigned max_pipes; 1312 unsigned max_tile_pipes; 1313 unsigned max_simds; 1314 unsigned max_backends; 1315 unsigned max_gprs; 1316 unsigned max_threads; 1317 unsigned max_stack_entries; 1318 unsigned max_hw_contexts; 1319 unsigned max_gs_threads; 1320 unsigned sx_max_export_size; 1321 unsigned sx_max_export_pos_size; 1322 unsigned sx_max_export_smx_size; 1323 unsigned sq_num_cf_insts; 1324 unsigned tiling_nbanks; 1325 unsigned tiling_npipes; 1326 unsigned tiling_group_size; 1327 unsigned tile_config; 1328 unsigned backend_map; 1329}; 1330 1331struct rv770_asic { 1332 unsigned max_pipes; 1333 unsigned max_tile_pipes; 1334 unsigned max_simds; 1335 unsigned max_backends; 1336 unsigned max_gprs; 1337 unsigned max_threads; 1338 unsigned max_stack_entries; 1339 unsigned max_hw_contexts; 1340 unsigned max_gs_threads; 1341 unsigned sx_max_export_size; 1342 unsigned sx_max_export_pos_size; 1343 unsigned sx_max_export_smx_size; 1344 unsigned sq_num_cf_insts; 1345 unsigned sx_num_of_sets; 1346 unsigned sc_prim_fifo_size; 1347 unsigned sc_hiz_tile_fifo_size; 1348 unsigned sc_earlyz_tile_fifo_fize; 1349 unsigned tiling_nbanks; 1350 unsigned tiling_npipes; 1351 unsigned tiling_group_size; 1352 unsigned tile_config; 1353 unsigned backend_map; 1354}; 1355 1356struct evergreen_asic { 1357 unsigned num_ses; 1358 unsigned max_pipes; 1359 unsigned max_tile_pipes; 1360 unsigned max_simds; 1361 unsigned max_backends; 1362 unsigned max_gprs; 1363 unsigned max_threads; 1364 unsigned max_stack_entries; 1365 unsigned max_hw_contexts; 1366 unsigned max_gs_threads; 1367 unsigned sx_max_export_size; 1368 unsigned sx_max_export_pos_size; 1369 unsigned sx_max_export_smx_size; 1370 unsigned sq_num_cf_insts; 1371 unsigned sx_num_of_sets; 1372 unsigned sc_prim_fifo_size; 1373 unsigned sc_hiz_tile_fifo_size; 1374 unsigned sc_earlyz_tile_fifo_size; 1375 unsigned tiling_nbanks; 1376 unsigned tiling_npipes; 1377 unsigned tiling_group_size; 1378 unsigned tile_config; 1379 unsigned backend_map; 1380}; 1381 1382struct cayman_asic { 1383 unsigned max_shader_engines; 1384 unsigned max_pipes_per_simd; 1385 unsigned max_tile_pipes; 1386 unsigned max_simds_per_se; 1387 unsigned max_backends_per_se; 1388 unsigned max_texture_channel_caches; 1389 unsigned max_gprs; 1390 unsigned max_threads; 1391 unsigned max_gs_threads; 1392 unsigned max_stack_entries; 1393 unsigned sx_num_of_sets; 1394 unsigned sx_max_export_size; 1395 unsigned sx_max_export_pos_size; 1396 unsigned sx_max_export_smx_size; 1397 unsigned max_hw_contexts; 1398 unsigned sq_num_cf_insts; 1399 unsigned sc_prim_fifo_size; 1400 unsigned sc_hiz_tile_fifo_size; 1401 unsigned sc_earlyz_tile_fifo_size; 1402 1403 unsigned num_shader_engines; 1404 unsigned num_shader_pipes_per_simd; 1405 unsigned num_tile_pipes; 1406 unsigned num_simds_per_se; 1407 unsigned num_backends_per_se; 1408 unsigned backend_disable_mask_per_asic; 1409 unsigned backend_map; 1410 unsigned num_texture_channel_caches; 1411 unsigned mem_max_burst_length_bytes; 1412 unsigned mem_row_size_in_kb; 1413 unsigned shader_engine_tile_size; 1414 unsigned num_gpus; 1415 unsigned multi_gpu_tile_size; 1416 1417 unsigned tile_config; 1418}; 1419 1420struct si_asic { 1421 unsigned max_shader_engines; 1422 unsigned max_tile_pipes; 1423 unsigned max_cu_per_sh; 1424 unsigned max_sh_per_se; 1425 unsigned max_backends_per_se; 1426 unsigned max_texture_channel_caches; 1427 unsigned max_gprs; 1428 unsigned max_gs_threads; 1429 unsigned max_hw_contexts; 1430 unsigned sc_prim_fifo_size_frontend; 1431 unsigned sc_prim_fifo_size_backend; 1432 unsigned sc_hiz_tile_fifo_size; 1433 unsigned sc_earlyz_tile_fifo_size; 1434 1435 unsigned num_tile_pipes; 1436 unsigned num_backends_per_se; 1437 unsigned backend_disable_mask_per_asic; 1438 unsigned backend_map; 1439 unsigned num_texture_channel_caches; 1440 unsigned mem_max_burst_length_bytes; 1441 unsigned mem_row_size_in_kb; 1442 unsigned shader_engine_tile_size; 1443 unsigned num_gpus; 1444 unsigned multi_gpu_tile_size; 1445 1446 unsigned tile_config; 1447}; 1448 1449union radeon_asic_config { 1450 struct r300_asic r300; 1451 struct r100_asic r100; 1452 struct r600_asic r600; 1453 struct rv770_asic rv770; 1454 struct evergreen_asic evergreen; 1455 struct cayman_asic cayman; 1456 struct si_asic si; 1457}; 1458 1459/* 1460 * asic initizalization from radeon_asic.c 1461 */ 1462int radeon_asic_init(struct radeon_device *rdev); 1463 1464 1465/* 1466 * IOCTL. 1467 */ 1468int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 1469 struct drm_file *filp); 1470int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 1471 struct drm_file *filp); 1472int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, 1473 struct drm_file *file_priv); 1474int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, 1475 struct drm_file *file_priv); 1476int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1477 struct drm_file *file_priv); 1478int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 1479 struct drm_file *file_priv); 1480int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1481 struct drm_file *filp); 1482int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 1483 struct drm_file *filp); 1484int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 1485 struct drm_file *filp); 1486int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 1487 struct drm_file *filp); 1488int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 1489 struct drm_file *filp); 1490int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1491int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 1492 struct drm_file *filp); 1493int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 1494 struct drm_file *filp); 1495 1496/* VRAM scratch page for HDP bug, default vram page */ 1497struct r600_vram_scratch { 1498 struct radeon_bo *robj; 1499 volatile uint32_t *ptr; 1500 u64 gpu_addr; 1501}; 1502 1503/* 1504 * ACPI 1505 */ 1506struct radeon_atif_notification_cfg { 1507 bool enabled; 1508 int command_code; 1509}; 1510 1511struct radeon_atif_notifications { 1512 bool display_switch; 1513 bool expansion_mode_change; 1514 bool thermal_state; 1515 bool forced_power_state; 1516 bool system_power_state; 1517 bool display_conf_change; 1518 bool px_gfx_switch; 1519 bool brightness_change; 1520 bool dgpu_display_event; 1521}; 1522 1523struct radeon_atif_functions { 1524 bool system_params; 1525 bool sbios_requests; 1526 bool select_active_disp; 1527 bool lid_state; 1528 bool get_tv_standard; 1529 bool set_tv_standard; 1530 bool get_panel_expansion_mode; 1531 bool set_panel_expansion_mode; 1532 bool temperature_change; 1533 bool graphics_device_types; 1534}; 1535 1536struct radeon_atif { 1537 struct radeon_atif_notifications notifications; 1538 struct radeon_atif_functions functions; 1539 struct radeon_atif_notification_cfg notification_cfg; 1540 struct radeon_encoder *encoder_for_bl; 1541}; 1542 1543struct radeon_atcs_functions { 1544 bool get_ext_state; 1545 bool pcie_perf_req; 1546 bool pcie_dev_rdy; 1547 bool pcie_bus_width; 1548}; 1549 1550struct radeon_atcs { 1551 struct radeon_atcs_functions functions; 1552}; 1553 1554/* 1555 * Core structure, functions and helpers. 1556 */ 1557typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); 1558typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); 1559 1560struct radeon_device { 1561 device_t dev; 1562 struct drm_device *ddev; 1563 struct sx exclusive_lock; 1564 /* ASIC */ 1565 union radeon_asic_config config; 1566 enum radeon_family family; 1567 unsigned long flags; 1568 int usec_timeout; 1569 enum radeon_pll_errata pll_errata; 1570 int num_gb_pipes; 1571 int num_z_pipes; 1572 int disp_priority; 1573 /* BIOS */ 1574 uint8_t *bios; 1575 bool is_atom_bios; 1576 uint16_t bios_header_start; 1577 struct radeon_bo *stollen_vga_memory; 1578 /* Register mmio */ 1579 resource_size_t rmmio_base; 1580 resource_size_t rmmio_size; 1581 /* protects concurrent MM_INDEX/DATA based register access */ 1582 struct mtx mmio_idx_lock; 1583 int rmmio_rid; 1584 struct resource *rmmio; 1585 radeon_rreg_t mc_rreg; 1586 radeon_wreg_t mc_wreg; 1587 radeon_rreg_t pll_rreg; 1588 radeon_wreg_t pll_wreg; 1589 uint32_t pcie_reg_mask; 1590 radeon_rreg_t pciep_rreg; 1591 radeon_wreg_t pciep_wreg; 1592 /* io port */ 1593 int rio_rid; 1594 struct resource *rio_mem; 1595 resource_size_t rio_mem_size; 1596 struct radeon_clock clock; 1597 struct radeon_mc mc; 1598 struct radeon_gart gart; 1599 struct radeon_mode_info mode_info; 1600 struct radeon_scratch scratch; 1601 struct radeon_mman mman; 1602 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 1603 struct cv fence_queue; 1604 struct mtx fence_queue_mtx; 1605 struct sx ring_lock; 1606 struct radeon_ring ring[RADEON_NUM_RINGS]; 1607 bool ib_pool_ready; 1608 struct radeon_sa_manager ring_tmp_bo; 1609 struct radeon_irq irq; 1610 struct radeon_asic *asic; 1611 struct radeon_gem gem; 1612 struct radeon_pm pm; 1613 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1614 struct radeon_wb wb; 1615 struct radeon_dummy_page dummy_page; 1616 bool shutdown; 1617 bool suspend; 1618 bool need_dma32; 1619 bool accel_working; 1620 bool fictitious_range_registered; 1621 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 1622 const struct firmware *me_fw; /* all family ME firmware */ 1623 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1624 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1625 const struct firmware *mc_fw; /* NI MC firmware */ 1626 const struct firmware *ce_fw; /* SI CE firmware */ 1627 struct r600_blit r600_blit; 1628 struct r600_vram_scratch vram_scratch; 1629 int msi_enabled; /* msi enabled */ 1630 struct r600_ih ih; /* r6/700 interrupt ring */ 1631 struct si_rlc rlc; 1632 struct taskqueue *tq; 1633 struct task hotplug_work; 1634 struct task audio_work; 1635 int num_crtc; /* number of crtcs */ 1636 struct sx dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 1637 bool audio_enabled; 1638 struct r600_audio audio_status; /* audio stuff */ 1639 struct { 1640 ACPI_HANDLE handle; 1641 ACPI_NOTIFY_HANDLER notifier_call; 1642 } acpi; 1643 /* only one userspace can use Hyperz features or CMASK at a time */ 1644 struct drm_file *hyperz_filp; 1645 struct drm_file *cmask_filp; 1646 /* i2c buses */ 1647 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; 1648 /* debugfs */ 1649 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS]; 1650 unsigned debugfs_count; 1651 /* virtual memory */ 1652 struct radeon_vm_manager vm_manager; 1653 struct sx gpu_clock_mutex; 1654 /* ACPI interface */ 1655 struct radeon_atif atif; 1656 struct radeon_atcs atcs; 1657}; 1658 1659int radeon_device_init(struct radeon_device *rdev, 1660 struct drm_device *ddev, 1661 uint32_t flags); 1662void radeon_device_fini(struct radeon_device *rdev); 1663int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 1664 1665uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 1666 bool always_indirect); 1667void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 1668 bool always_indirect); 1669u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 1670void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1671 1672/* 1673 * Cast helper 1674 */ 1675#define to_radeon_fence(p) ((struct radeon_fence *)(p)) 1676 1677/* 1678 * Registers read & write functions. 1679 */ 1680#define RREG8(reg) bus_read_1((rdev->rmmio), (reg)) 1681#define WREG8(reg, v) bus_write_1((rdev->rmmio), (reg), v) 1682#define RREG16(reg) bus_read_2((rdev->rmmio), (reg)) 1683#define WREG16(reg, v) bus_write_2((rdev->rmmio), (reg), v) 1684#define RREG32(reg) r100_mm_rreg(rdev, (reg), false) 1685#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true) 1686#define DREG32(reg) DRM_INFO("REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 1687#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false) 1688#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true) 1689#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1690#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1691#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) 1692#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) 1693#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) 1694#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 1695#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 1696#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 1697#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg)) 1698#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) 1699#define WREG32_P(reg, val, mask) \ 1700 do { \ 1701 uint32_t tmp_ = RREG32(reg); \ 1702 tmp_ &= (mask); \ 1703 tmp_ |= ((val) & ~(mask)); \ 1704 WREG32(reg, tmp_); \ 1705 } while (0) 1706#define WREG32_PLL_P(reg, val, mask) \ 1707 do { \ 1708 uint32_t tmp_ = RREG32_PLL(reg); \ 1709 tmp_ &= (mask); \ 1710 tmp_ |= ((val) & ~(mask)); \ 1711 WREG32_PLL(reg, tmp_); \ 1712 } while (0) 1713#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false)) 1714#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 1715#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 1716 1717/* 1718 * Indirect registers accessor 1719 */ 1720static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 1721{ 1722 uint32_t r; 1723 1724 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 1725 r = RREG32(RADEON_PCIE_DATA); 1726 return r; 1727} 1728 1729static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1730{ 1731 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 1732 WREG32(RADEON_PCIE_DATA, (v)); 1733} 1734 1735void r100_pll_errata_after_index(struct radeon_device *rdev); 1736 1737 1738/* 1739 * ASICs helpers. 1740 */ 1741#define ASIC_IS_RN50(rdev) ((rdev->ddev->pci_device == 0x515e) || \ 1742 (rdev->ddev->pci_device == 0x5969)) 1743#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ 1744 (rdev->family == CHIP_RV200) || \ 1745 (rdev->family == CHIP_RS100) || \ 1746 (rdev->family == CHIP_RS200) || \ 1747 (rdev->family == CHIP_RV250) || \ 1748 (rdev->family == CHIP_RV280) || \ 1749 (rdev->family == CHIP_RS300)) 1750#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \ 1751 (rdev->family == CHIP_RV350) || \ 1752 (rdev->family == CHIP_R350) || \ 1753 (rdev->family == CHIP_RV380) || \ 1754 (rdev->family == CHIP_R420) || \ 1755 (rdev->family == CHIP_R423) || \ 1756 (rdev->family == CHIP_RV410) || \ 1757 (rdev->family == CHIP_RS400) || \ 1758 (rdev->family == CHIP_RS480)) 1759#define ASIC_IS_X2(rdev) ((rdev->ddev->pci_device == 0x9441) || \ 1760 (rdev->ddev->pci_device == 0x9443) || \ 1761 (rdev->ddev->pci_device == 0x944B) || \ 1762 (rdev->ddev->pci_device == 0x9506) || \ 1763 (rdev->ddev->pci_device == 0x9509) || \ 1764 (rdev->ddev->pci_device == 0x950F) || \ 1765 (rdev->ddev->pci_device == 0x689C) || \ 1766 (rdev->ddev->pci_device == 0x689D)) 1767#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1768#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ 1769 (rdev->family == CHIP_RS690) || \ 1770 (rdev->family == CHIP_RS740) || \ 1771 (rdev->family >= CHIP_R600)) 1772#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1773#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1774#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) 1775#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ 1776 (rdev->flags & RADEON_IS_IGP)) 1777#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS)) 1778#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA)) 1779#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ 1780 (rdev->flags & RADEON_IS_IGP)) 1781 1782/* 1783 * BIOS helpers. 1784 */ 1785#define RBIOS8(i) (rdev->bios[i]) 1786#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1787#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1788 1789int radeon_combios_init(struct radeon_device *rdev); 1790void radeon_combios_fini(struct radeon_device *rdev); 1791int radeon_atombios_init(struct radeon_device *rdev); 1792void radeon_atombios_fini(struct radeon_device *rdev); 1793 1794 1795/* 1796 * RING helpers. 1797 */ 1798#if !defined(DRM_DEBUG_CODE) || DRM_DEBUG_CODE == 0 1799static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) 1800{ 1801 ring->ring[ring->wptr++] = v; 1802 ring->wptr &= ring->ptr_mask; 1803 ring->count_dw--; 1804 ring->ring_free_dw--; 1805} 1806#else 1807/* With debugging this is just too big to inline */ 1808void radeon_ring_write(struct radeon_ring *ring, uint32_t v); 1809#endif 1810 1811/* 1812 * ASICs macro. 1813 */ 1814#define radeon_init(rdev) (rdev)->asic->init((rdev)) 1815#define radeon_fini(rdev) (rdev)->asic->fini((rdev)) 1816#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 1817#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1818#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) 1819#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1820#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 1821#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 1822#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 1823#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 1824#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 1825#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags))) 1826#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) 1827#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) 1828#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 1829#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) 1830#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 1831#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) 1832#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) 1833#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 1834#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 1835#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 1836#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l)) 1837#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) 1838#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) 1839#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 1840#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 1841#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) 1842#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) 1843#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index 1844#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index 1845#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index 1846#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev)) 1847#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e)) 1848#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev)) 1849#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e)) 1850#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev)) 1851#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) 1852#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) 1853#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) 1854#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) 1855#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) 1856#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev)) 1857#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev)) 1858#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h)) 1859#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h)) 1860#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) 1861#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev)) 1862#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev)) 1863#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) 1864#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) 1865#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) 1866#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) 1867#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) 1868#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) 1869#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) 1870#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) 1871 1872/* Common functions */ 1873/* AGP */ 1874extern int radeon_gpu_reset(struct radeon_device *rdev); 1875extern void radeon_agp_disable(struct radeon_device *rdev); 1876extern int radeon_modeset_init(struct radeon_device *rdev); 1877extern void radeon_modeset_fini(struct radeon_device *rdev); 1878extern bool radeon_card_posted(struct radeon_device *rdev); 1879extern void radeon_update_bandwidth_info(struct radeon_device *rdev); 1880extern void radeon_update_display_priority(struct radeon_device *rdev); 1881extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1882extern void radeon_scratch_init(struct radeon_device *rdev); 1883extern void radeon_wb_fini(struct radeon_device *rdev); 1884extern int radeon_wb_init(struct radeon_device *rdev); 1885extern void radeon_wb_disable(struct radeon_device *rdev); 1886extern void radeon_surface_init(struct radeon_device *rdev); 1887extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1888extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 1889extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 1890extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); 1891extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1892extern int radeon_resume_kms(struct drm_device *dev); 1893extern int radeon_suspend_kms(struct drm_device *dev); 1894extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1895 1896/* 1897 * vm 1898 */ 1899int radeon_vm_manager_init(struct radeon_device *rdev); 1900void radeon_vm_manager_fini(struct radeon_device *rdev); 1901void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 1902void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 1903int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); 1904void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); 1905struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 1906 struct radeon_vm *vm, int ring); 1907void radeon_vm_fence(struct radeon_device *rdev, 1908 struct radeon_vm *vm, 1909 struct radeon_fence *fence); 1910uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 1911int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1912 struct radeon_vm *vm, 1913 struct radeon_bo *bo, 1914 struct ttm_mem_reg *mem); 1915void radeon_vm_bo_invalidate(struct radeon_device *rdev, 1916 struct radeon_bo *bo); 1917struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 1918 struct radeon_bo *bo); 1919struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, 1920 struct radeon_vm *vm, 1921 struct radeon_bo *bo); 1922int radeon_vm_bo_set_addr(struct radeon_device *rdev, 1923 struct radeon_bo_va *bo_va, 1924 uint64_t offset, 1925 uint32_t flags); 1926int radeon_vm_bo_rmv(struct radeon_device *rdev, 1927 struct radeon_bo_va *bo_va); 1928 1929/* audio */ 1930void r600_audio_update_hdmi(void *arg, int pending); 1931 1932/* 1933 * R600 vram scratch functions 1934 */ 1935int r600_vram_scratch_init(struct radeon_device *rdev); 1936void r600_vram_scratch_fini(struct radeon_device *rdev); 1937 1938/* 1939 * r600 cs checking helper 1940 */ 1941unsigned r600_mip_minify(unsigned size, unsigned level); 1942bool r600_fmt_is_valid_color(u32 format); 1943bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family); 1944int r600_fmt_get_blocksize(u32 format); 1945int r600_fmt_get_nblocksx(u32 format, u32 w); 1946int r600_fmt_get_nblocksy(u32 format, u32 h); 1947 1948/* 1949 * r600 functions used by radeon_encoder.c 1950 */ 1951struct radeon_hdmi_acr { 1952 u32 clock; 1953 1954 int n_32khz; 1955 int cts_32khz; 1956 1957 int n_44_1khz; 1958 int cts_44_1khz; 1959 1960 int n_48khz; 1961 int cts_48khz; 1962 1963}; 1964 1965extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); 1966 1967extern void r600_hdmi_enable(struct drm_encoder *encoder); 1968extern void r600_hdmi_disable(struct drm_encoder *encoder); 1969extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1970extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1971 u32 tiling_pipe_num, 1972 u32 max_rb_num, 1973 u32 total_max_rb_num, 1974 u32 enabled_rb_mask); 1975 1976/* 1977 * evergreen functions used by radeon_encoder.c 1978 */ 1979 1980extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1981 1982extern int ni_init_microcode(struct radeon_device *rdev); 1983extern int ni_mc_load_microcode(struct radeon_device *rdev); 1984extern void ni_fini_microcode(struct radeon_device *rdev); 1985 1986/* radeon_acpi.c */ 1987extern int radeon_acpi_init(struct radeon_device *rdev); 1988extern void radeon_acpi_fini(struct radeon_device *rdev); 1989 1990/* Prototypes added by @dumbbell. */ 1991 1992/* atombios_encoders.c */ 1993void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, 1994 struct drm_connector *drm_connector); 1995void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, 1996 uint32_t supported_device, u16 caps); 1997 1998/* radeon_atombios.c */ 1999bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, 2000 struct drm_display_mode *mode); 2001 2002/* radeon_combios.c */ 2003void radeon_combios_connected_scratch_regs(struct drm_connector *connector, 2004 struct drm_encoder *encoder, bool connected); 2005 2006/* radeon_connectors.c */ 2007void radeon_atombios_connected_scratch_regs(struct drm_connector *connector, 2008 struct drm_encoder *encoder, bool connected); 2009void radeon_add_legacy_connector(struct drm_device *dev, 2010 uint32_t connector_id, 2011 uint32_t supported_device, 2012 int connector_type, 2013 struct radeon_i2c_bus_rec *i2c_bus, 2014 uint16_t connector_object_id, 2015 struct radeon_hpd *hpd); 2016void radeon_add_atom_connector(struct drm_device *dev, 2017 uint32_t connector_id, 2018 uint32_t supported_device, 2019 int connector_type, 2020 struct radeon_i2c_bus_rec *i2c_bus, 2021 uint32_t igp_lane_info, 2022 uint16_t connector_object_id, 2023 struct radeon_hpd *hpd, 2024 struct radeon_router *router); 2025 2026/* radeon_encoders.c */ 2027uint32_t radeon_get_encoder_enum(struct drm_device *dev, 2028 uint32_t supported_device, uint8_t dac); 2029void radeon_link_encoder_connector(struct drm_device *dev); 2030 2031/* radeon_legacy_encoders.c */ 2032void radeon_add_legacy_encoder(struct drm_device *dev, 2033 uint32_t encoder_enum, uint32_t supported_device); 2034void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, 2035 struct drm_connector *drm_connector); 2036 2037/* radeon_pm.c */ 2038void radeon_pm_acpi_event_handler(struct radeon_device *rdev); 2039 2040/* radeon_ttm.c */ 2041int radeon_ttm_init(struct radeon_device *rdev); 2042void radeon_ttm_fini(struct radeon_device *rdev); 2043
| 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/linker.h> 73#include <sys/firmware.h> 74 75#include <contrib/dev/acpica/include/acpi.h> 76#include <dev/acpica/acpivar.h> 77 78#include <dev/drm2/ttm/ttm_bo_api.h> 79#include <dev/drm2/ttm/ttm_bo_driver.h> 80#include <dev/drm2/ttm/ttm_placement.h> 81#include <dev/drm2/ttm/ttm_module.h> 82#include <dev/drm2/ttm/ttm_execbuf_util.h> 83 84#include "radeon_family.h" 85#include "radeon_mode.h" 86#include "radeon_reg.h" 87 88/* 89 * Modules parameters. 90 */ 91extern int radeon_no_wb; 92extern int radeon_modeset; 93extern int radeon_dynclks; 94extern int radeon_r4xx_atom; 95extern int radeon_agpmode; 96extern int radeon_vram_limit; 97extern int radeon_gart_size; 98extern int radeon_benchmarking; 99extern int radeon_testing; 100extern int radeon_connector_table; 101extern int radeon_tv; 102extern int radeon_audio; 103extern int radeon_disp_priority; 104extern int radeon_hw_i2c; 105extern int radeon_pcie_gen2; 106extern int radeon_msi; 107extern int radeon_lockup_timeout; 108 109/* 110 * Copy from radeon_drv.h so we don't have to include both and have conflicting 111 * symbol; 112 */ 113#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 114#define RADEON_FENCE_JIFFIES_TIMEOUT (DRM_HZ / 2) 115/* RADEON_IB_POOL_SIZE must be a power of 2 */ 116#define RADEON_IB_POOL_SIZE 16 117#define RADEON_DEBUGFS_MAX_COMPONENTS 32 118#define RADEONFB_CONN_LIMIT 4 119#define RADEON_BIOS_NUM_SCRATCH 8 120 121/* max number of rings */ 122#define RADEON_NUM_RINGS 5 123 124/* fence seq are set to this number when signaled */ 125#define RADEON_FENCE_SIGNALED_SEQ 0LL 126 127/* internal ring indices */ 128/* r1xx+ has gfx CP ring */ 129#define RADEON_RING_TYPE_GFX_INDEX 0 130 131/* cayman has 2 compute CP rings */ 132#define CAYMAN_RING_TYPE_CP1_INDEX 1 133#define CAYMAN_RING_TYPE_CP2_INDEX 2 134 135/* R600+ has an async dma ring */ 136#define R600_RING_TYPE_DMA_INDEX 3 137/* cayman add a second async dma ring */ 138#define CAYMAN_RING_TYPE_DMA1_INDEX 4 139 140/* hardcode those limit for now */ 141#define RADEON_VA_IB_OFFSET (1 << 20) 142#define RADEON_VA_RESERVED_SIZE (8 << 20) 143#define RADEON_IB_VM_MAX_SIZE (64 << 10) 144 145/* reset flags */ 146#define RADEON_RESET_GFX (1 << 0) 147#define RADEON_RESET_COMPUTE (1 << 1) 148#define RADEON_RESET_DMA (1 << 2) 149 150/* 151 * Errata workarounds. 152 */ 153enum radeon_pll_errata { 154 CHIP_ERRATA_R300_CG = 0x00000001, 155 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002, 156 CHIP_ERRATA_PLL_DELAY = 0x00000004 157}; 158 159 160struct radeon_device; 161 162 163/* 164 * BIOS. 165 */ 166bool radeon_get_bios(struct radeon_device *rdev); 167 168/* 169 * Dummy page 170 */ 171struct radeon_dummy_page { 172 drm_dma_handle_t *dmah; 173 dma_addr_t addr; 174}; 175int radeon_dummy_page_init(struct radeon_device *rdev); 176void radeon_dummy_page_fini(struct radeon_device *rdev); 177 178 179/* 180 * Clocks 181 */ 182struct radeon_clock { 183 struct radeon_pll p1pll; 184 struct radeon_pll p2pll; 185 struct radeon_pll dcpll; 186 struct radeon_pll spll; 187 struct radeon_pll mpll; 188 /* 10 Khz units */ 189 uint32_t default_mclk; 190 uint32_t default_sclk; 191 uint32_t default_dispclk; 192 uint32_t dp_extclk; 193 uint32_t max_pixel_clock; 194}; 195 196/* 197 * Power management 198 */ 199int radeon_pm_init(struct radeon_device *rdev); 200void radeon_pm_fini(struct radeon_device *rdev); 201void radeon_pm_compute_clocks(struct radeon_device *rdev); 202void radeon_pm_suspend(struct radeon_device *rdev); 203void radeon_pm_resume(struct radeon_device *rdev); 204void radeon_combios_get_power_modes(struct radeon_device *rdev); 205void radeon_atombios_get_power_modes(struct radeon_device *rdev); 206void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 207void rs690_pm_info(struct radeon_device *rdev); 208extern int rv6xx_get_temp(struct radeon_device *rdev); 209extern int rv770_get_temp(struct radeon_device *rdev); 210extern int evergreen_get_temp(struct radeon_device *rdev); 211extern int sumo_get_temp(struct radeon_device *rdev); 212extern int si_get_temp(struct radeon_device *rdev); 213extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, 214 unsigned *bankh, unsigned *mtaspect, 215 unsigned *tile_split); 216 217/* 218 * Fences. 219 */ 220struct radeon_fence_driver { 221 uint32_t scratch_reg; 222 uint64_t gpu_addr; 223 volatile uint32_t *cpu_addr; 224 /* sync_seq is protected by ring emission lock */ 225 uint64_t sync_seq[RADEON_NUM_RINGS]; 226 atomic64_t last_seq; 227 unsigned long last_activity; 228 bool initialized; 229}; 230 231struct radeon_fence { 232 struct radeon_device *rdev; 233 unsigned int kref; 234 /* protected by radeon_fence.lock */ 235 uint64_t seq; 236 /* RB, DMA, etc. */ 237 unsigned ring; 238}; 239 240int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 241int radeon_fence_driver_init(struct radeon_device *rdev); 242void radeon_fence_driver_fini(struct radeon_device *rdev); 243void radeon_fence_driver_force_completion(struct radeon_device *rdev); 244int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); 245void radeon_fence_process(struct radeon_device *rdev, int ring); 246bool radeon_fence_signaled(struct radeon_fence *fence); 247int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 248int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 249int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); 250int radeon_fence_wait_any(struct radeon_device *rdev, 251 struct radeon_fence **fences, 252 bool intr); 253struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 254void radeon_fence_unref(struct radeon_fence **fence); 255unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); 256bool radeon_fence_need_sync(struct radeon_fence *fence, int ring); 257void radeon_fence_note_sync(struct radeon_fence *fence, int ring); 258static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a, 259 struct radeon_fence *b) 260{ 261 if (!a) { 262 return b; 263 } 264 265 if (!b) { 266 return a; 267 } 268 269 KASSERT(a->ring == b->ring, ("\"a\" and \"b\" belongs to different rings")); 270 271 if (a->seq > b->seq) { 272 return a; 273 } else { 274 return b; 275 } 276} 277 278static inline bool radeon_fence_is_earlier(struct radeon_fence *a, 279 struct radeon_fence *b) 280{ 281 if (!a) { 282 return false; 283 } 284 285 if (!b) { 286 return true; 287 } 288 289 KASSERT(a->ring == b->ring, ("\"a\" and \"b\" belongs to different rings")); 290 291 return a->seq < b->seq; 292} 293 294/* 295 * Tiling registers 296 */ 297struct radeon_surface_reg { 298 struct radeon_bo *bo; 299}; 300 301#define RADEON_GEM_MAX_SURFACES 8 302 303/* 304 * TTM. 305 */ 306struct radeon_mman { 307 struct ttm_bo_global_ref bo_global_ref; 308 struct drm_global_reference mem_global_ref; 309 struct ttm_bo_device bdev; 310 bool mem_global_referenced; 311 bool initialized; 312}; 313 314/* bo virtual address in a specific vm */ 315struct radeon_bo_va { 316 /* protected by bo being reserved */ 317 struct list_head bo_list; 318 uint64_t soffset; 319 uint64_t eoffset; 320 uint32_t flags; 321 bool valid; 322 unsigned ref_count; 323 324 /* protected by vm mutex */ 325 struct list_head vm_list; 326 327 /* constant after initialization */ 328 struct radeon_vm *vm; 329 struct radeon_bo *bo; 330}; 331 332struct radeon_bo { 333 /* Protected by gem.mutex */ 334 struct list_head list; 335 /* Protected by tbo.reserved */ 336 u32 placements[3]; 337 struct ttm_placement placement; 338 struct ttm_buffer_object tbo; 339 struct ttm_bo_kmap_obj kmap; 340 unsigned pin_count; 341 void *kptr; 342 u32 tiling_flags; 343 u32 pitch; 344 int surface_reg; 345 /* list of all virtual address to which this bo 346 * is associated to 347 */ 348 struct list_head va; 349 /* Constant after initialization */ 350 struct radeon_device *rdev; 351 struct drm_gem_object gem_base; 352 353 struct ttm_bo_kmap_obj dma_buf_vmap; 354 int vmapping_count; 355}; 356#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 357 358struct radeon_bo_list { 359 struct ttm_validate_buffer tv; 360 struct radeon_bo *bo; 361 uint64_t gpu_offset; 362 unsigned rdomain; 363 unsigned wdomain; 364 u32 tiling_flags; 365}; 366 367/* sub-allocation manager, it has to be protected by another lock. 368 * By conception this is an helper for other part of the driver 369 * like the indirect buffer or semaphore, which both have their 370 * locking. 371 * 372 * Principe is simple, we keep a list of sub allocation in offset 373 * order (first entry has offset == 0, last entry has the highest 374 * offset). 375 * 376 * When allocating new object we first check if there is room at 377 * the end total_size - (last_object_offset + last_object_size) >= 378 * alloc_size. If so we allocate new object there. 379 * 380 * When there is not enough room at the end, we start waiting for 381 * each sub object until we reach object_offset+object_size >= 382 * alloc_size, this object then become the sub object we return. 383 * 384 * Alignment can't be bigger than page size. 385 * 386 * Hole are not considered for allocation to keep things simple. 387 * Assumption is that there won't be hole (all object on same 388 * alignment). 389 */ 390struct radeon_sa_manager { 391 struct cv wq; 392 struct sx wq_lock; 393 struct radeon_bo *bo; 394 struct list_head *hole; 395 struct list_head flist[RADEON_NUM_RINGS]; 396 struct list_head olist; 397 unsigned size; 398 uint64_t gpu_addr; 399 void *cpu_ptr; 400 uint32_t domain; 401}; 402 403struct radeon_sa_bo; 404 405/* sub-allocation buffer */ 406struct radeon_sa_bo { 407 struct list_head olist; 408 struct list_head flist; 409 struct radeon_sa_manager *manager; 410 unsigned soffset; 411 unsigned eoffset; 412 struct radeon_fence *fence; 413}; 414 415/* 416 * GEM objects. 417 */ 418struct radeon_gem { 419 struct sx mutex; 420 struct list_head objects; 421}; 422 423int radeon_gem_init(struct radeon_device *rdev); 424void radeon_gem_fini(struct radeon_device *rdev); 425int radeon_gem_object_create(struct radeon_device *rdev, int size, 426 int alignment, int initial_domain, 427 bool discardable, bool kernel, 428 struct drm_gem_object **obj); 429 430int radeon_mode_dumb_create(struct drm_file *file_priv, 431 struct drm_device *dev, 432 struct drm_mode_create_dumb *args); 433int radeon_mode_dumb_mmap(struct drm_file *filp, 434 struct drm_device *dev, 435 uint32_t handle, uint64_t *offset_p); 436int radeon_mode_dumb_destroy(struct drm_file *file_priv, 437 struct drm_device *dev, 438 uint32_t handle); 439 440/* 441 * Semaphores. 442 */ 443/* everything here is constant */ 444struct radeon_semaphore { 445 struct radeon_sa_bo *sa_bo; 446 signed waiters; 447 uint64_t gpu_addr; 448}; 449 450int radeon_semaphore_create(struct radeon_device *rdev, 451 struct radeon_semaphore **semaphore); 452void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 453 struct radeon_semaphore *semaphore); 454void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 455 struct radeon_semaphore *semaphore); 456int radeon_semaphore_sync_rings(struct radeon_device *rdev, 457 struct radeon_semaphore *semaphore, 458 int signaler, int waiter); 459void radeon_semaphore_free(struct radeon_device *rdev, 460 struct radeon_semaphore **semaphore, 461 struct radeon_fence *fence); 462 463/* 464 * GART structures, functions & helpers 465 */ 466struct radeon_mc; 467 468#define RADEON_GPU_PAGE_SIZE 4096 469#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 470#define RADEON_GPU_PAGE_SHIFT 12 471#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK) 472 473struct radeon_gart { 474 drm_dma_handle_t *dmah; 475 dma_addr_t table_addr; 476 struct radeon_bo *robj; 477 void *ptr; 478 unsigned num_gpu_pages; 479 unsigned num_cpu_pages; 480 unsigned table_size; 481 vm_page_t *pages; 482 dma_addr_t *pages_addr; 483 bool ready; 484}; 485 486int radeon_gart_table_ram_alloc(struct radeon_device *rdev); 487void radeon_gart_table_ram_free(struct radeon_device *rdev); 488int radeon_gart_table_vram_alloc(struct radeon_device *rdev); 489void radeon_gart_table_vram_free(struct radeon_device *rdev); 490int radeon_gart_table_vram_pin(struct radeon_device *rdev); 491void radeon_gart_table_vram_unpin(struct radeon_device *rdev); 492int radeon_gart_init(struct radeon_device *rdev); 493void radeon_gart_fini(struct radeon_device *rdev); 494void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 495 int pages); 496int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 497 int pages, vm_page_t *pagelist, 498 dma_addr_t *dma_addr); 499void radeon_gart_restore(struct radeon_device *rdev); 500 501 502/* 503 * GPU MC structures, functions & helpers 504 */ 505struct radeon_mc { 506 resource_size_t aper_size; 507 resource_size_t aper_base; 508 resource_size_t agp_base; 509 /* for some chips with <= 32MB we need to lie 510 * about vram size near mc fb location */ 511 u64 mc_vram_size; 512 u64 visible_vram_size; 513 u64 gtt_size; 514 u64 gtt_start; 515 u64 gtt_end; 516 u64 vram_start; 517 u64 vram_end; 518 unsigned vram_width; 519 u64 real_vram_size; 520 int vram_mtrr; 521 bool vram_is_ddr; 522 bool igp_sideport_enabled; 523 u64 gtt_base_align; 524}; 525 526bool radeon_combios_sideport_present(struct radeon_device *rdev); 527bool radeon_atombios_sideport_present(struct radeon_device *rdev); 528 529/* 530 * GPU scratch registers structures, functions & helpers 531 */ 532struct radeon_scratch { 533 unsigned num_reg; 534 uint32_t reg_base; 535 bool free[32]; 536 uint32_t reg[32]; 537}; 538 539int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg); 540void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); 541 542 543/* 544 * IRQS. 545 */ 546 547struct radeon_unpin_work { 548 struct task work; 549 struct radeon_device *rdev; 550 int crtc_id; 551 struct radeon_fence *fence; 552 struct drm_pending_vblank_event *event; 553 struct radeon_bo *old_rbo; 554 u64 new_crtc_base; 555}; 556 557struct r500_irq_stat_regs { 558 u32 disp_int; 559 u32 hdmi0_status; 560}; 561 562struct r600_irq_stat_regs { 563 u32 disp_int; 564 u32 disp_int_cont; 565 u32 disp_int_cont2; 566 u32 d1grph_int; 567 u32 d2grph_int; 568 u32 hdmi0_status; 569 u32 hdmi1_status; 570}; 571 572struct evergreen_irq_stat_regs { 573 u32 disp_int; 574 u32 disp_int_cont; 575 u32 disp_int_cont2; 576 u32 disp_int_cont3; 577 u32 disp_int_cont4; 578 u32 disp_int_cont5; 579 u32 d1grph_int; 580 u32 d2grph_int; 581 u32 d3grph_int; 582 u32 d4grph_int; 583 u32 d5grph_int; 584 u32 d6grph_int; 585 u32 afmt_status1; 586 u32 afmt_status2; 587 u32 afmt_status3; 588 u32 afmt_status4; 589 u32 afmt_status5; 590 u32 afmt_status6; 591}; 592 593union radeon_irq_stat_regs { 594 struct r500_irq_stat_regs r500; 595 struct r600_irq_stat_regs r600; 596 struct evergreen_irq_stat_regs evergreen; 597}; 598 599#define RADEON_MAX_HPD_PINS 6 600#define RADEON_MAX_CRTCS 6 601#define RADEON_MAX_AFMT_BLOCKS 6 602 603struct radeon_irq { 604 bool installed; 605 struct mtx lock; 606 atomic_t ring_int[RADEON_NUM_RINGS]; 607 bool crtc_vblank_int[RADEON_MAX_CRTCS]; 608 atomic_t pflip[RADEON_MAX_CRTCS]; 609 wait_queue_head_t vblank_queue; 610 bool hpd[RADEON_MAX_HPD_PINS]; 611 bool afmt[RADEON_MAX_AFMT_BLOCKS]; 612 union radeon_irq_stat_regs stat_regs; 613}; 614 615int radeon_irq_kms_init(struct radeon_device *rdev); 616void radeon_irq_kms_fini(struct radeon_device *rdev); 617void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); 618void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); 619void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); 620void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); 621void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block); 622void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block); 623void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 624void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 625 626/* 627 * CP & rings. 628 */ 629 630struct radeon_ib { 631 struct radeon_sa_bo *sa_bo; 632 uint32_t length_dw; 633 uint64_t gpu_addr; 634 uint32_t *ptr; 635 int ring; 636 struct radeon_fence *fence; 637 struct radeon_vm *vm; 638 bool is_const_ib; 639 struct radeon_fence *sync_to[RADEON_NUM_RINGS]; 640 struct radeon_semaphore *semaphore; 641}; 642 643struct radeon_ring { 644 struct radeon_bo *ring_obj; 645 volatile uint32_t *ring; 646 unsigned rptr; 647 unsigned rptr_offs; 648 unsigned rptr_reg; 649 unsigned rptr_save_reg; 650 u64 next_rptr_gpu_addr; 651 volatile u32 *next_rptr_cpu_addr; 652 unsigned wptr; 653 unsigned wptr_old; 654 unsigned wptr_reg; 655 unsigned ring_size; 656 unsigned ring_free_dw; 657 int count_dw; 658 unsigned long last_activity; 659 unsigned last_rptr; 660 uint64_t gpu_addr; 661 uint32_t align_mask; 662 uint32_t ptr_mask; 663 bool ready; 664 u32 ptr_reg_shift; 665 u32 ptr_reg_mask; 666 u32 nop; 667 u32 idx; 668 u64 last_semaphore_signal_addr; 669 u64 last_semaphore_wait_addr; 670}; 671 672/* 673 * VM 674 */ 675 676/* maximum number of VMIDs */ 677#define RADEON_NUM_VM 16 678 679/* defines number of bits in page table versus page directory, 680 * a page is 4KB so we have 12 bits offset, 9 bits in the page 681 * table and the remaining 19 bits are in the page directory */ 682#define RADEON_VM_BLOCK_SIZE 9 683 684/* number of entries in page table */ 685#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) 686 687struct radeon_vm { 688 struct list_head list; 689 struct list_head va; 690 unsigned id; 691 692 /* contains the page directory */ 693 struct radeon_sa_bo *page_directory; 694 uint64_t pd_gpu_addr; 695 696 /* array of page tables, one for each page directory entry */ 697 struct radeon_sa_bo **page_tables; 698 699 struct sx mutex; 700 /* last fence for cs using this vm */ 701 struct radeon_fence *fence; 702 /* last flush or NULL if we still need to flush */ 703 struct radeon_fence *last_flush; 704}; 705 706struct radeon_vm_manager { 707 struct sx lock; 708 struct list_head lru_vm; 709 struct radeon_fence *active[RADEON_NUM_VM]; 710 struct radeon_sa_manager sa_manager; 711 uint32_t max_pfn; 712 /* number of VMIDs */ 713 unsigned nvm; 714 /* vram base address for page table entry */ 715 u64 vram_base_offset; 716 /* is vm enabled? */ 717 bool enabled; 718}; 719 720/* 721 * file private structure 722 */ 723struct radeon_fpriv { 724 struct radeon_vm vm; 725}; 726 727/* 728 * R6xx+ IH ring 729 */ 730struct r600_ih { 731 struct radeon_bo *ring_obj; 732 volatile uint32_t *ring; 733 unsigned rptr; 734 unsigned ring_size; 735 uint64_t gpu_addr; 736 uint32_t ptr_mask; 737 atomic_t lock; 738 bool enabled; 739}; 740 741struct r600_blit_cp_primitives { 742 void (*set_render_target)(struct radeon_device *rdev, int format, 743 int w, int h, u64 gpu_addr); 744 void (*cp_set_surface_sync)(struct radeon_device *rdev, 745 u32 sync_type, u32 size, 746 u64 mc_addr); 747 void (*set_shaders)(struct radeon_device *rdev); 748 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); 749 void (*set_tex_resource)(struct radeon_device *rdev, 750 int format, int w, int h, int pitch, 751 u64 gpu_addr, u32 size); 752 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, 753 int x2, int y2); 754 void (*draw_auto)(struct radeon_device *rdev); 755 void (*set_default_state)(struct radeon_device *rdev); 756}; 757 758struct r600_blit { 759 struct radeon_bo *shader_obj; 760 struct r600_blit_cp_primitives primitives; 761 int max_dim; 762 int ring_size_common; 763 int ring_size_per_loop; 764 u64 shader_gpu_addr; 765 u32 vs_offset, ps_offset; 766 u32 state_offset; 767 u32 state_len; 768}; 769 770/* 771 * SI RLC stuff 772 */ 773struct si_rlc { 774 /* for power gating */ 775 struct radeon_bo *save_restore_obj; 776 uint64_t save_restore_gpu_addr; 777 /* for clear state */ 778 struct radeon_bo *clear_state_obj; 779 uint64_t clear_state_gpu_addr; 780}; 781 782int radeon_ib_get(struct radeon_device *rdev, int ring, 783 struct radeon_ib *ib, struct radeon_vm *vm, 784 unsigned size); 785void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 786int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 787 struct radeon_ib *const_ib); 788int radeon_ib_pool_init(struct radeon_device *rdev); 789void radeon_ib_pool_fini(struct radeon_device *rdev); 790int radeon_ib_ring_tests(struct radeon_device *rdev); 791/* Ring access between begin & end cannot sleep */ 792bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, 793 struct radeon_ring *ring); 794void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); 795int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 796int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 797void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); 798void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); 799void radeon_ring_undo(struct radeon_ring *ring); 800void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 801int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 802void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring); 803void radeon_ring_lockup_update(struct radeon_ring *ring); 804bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 805unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, 806 uint32_t **data); 807int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, 808 unsigned size, uint32_t *data); 809int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, 810 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, 811 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); 812void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); 813 814 815/* r600 async dma */ 816void r600_dma_stop(struct radeon_device *rdev); 817int r600_dma_resume(struct radeon_device *rdev); 818void r600_dma_fini(struct radeon_device *rdev); 819 820void cayman_dma_stop(struct radeon_device *rdev); 821int cayman_dma_resume(struct radeon_device *rdev); 822void cayman_dma_fini(struct radeon_device *rdev); 823 824/* 825 * CS. 826 */ 827struct radeon_cs_reloc { 828 struct drm_gem_object *gobj; 829 struct radeon_bo *robj; 830 struct radeon_bo_list lobj; 831 uint32_t handle; 832 uint32_t flags; 833}; 834 835struct radeon_cs_chunk { 836 uint32_t chunk_id; 837 uint32_t length_dw; 838 int kpage_idx[2]; 839 uint32_t *kpage[2]; 840 uint32_t *kdata; 841 void __user *user_ptr; 842 int last_copied_page; 843 int last_page_index; 844}; 845 846struct radeon_cs_parser { 847 device_t dev; 848 struct radeon_device *rdev; 849 struct drm_file *filp; 850 /* chunks */ 851 unsigned nchunks; 852 struct radeon_cs_chunk *chunks; 853 uint64_t *chunks_array; 854 /* IB */ 855 unsigned idx; 856 /* relocations */ 857 unsigned nrelocs; 858 struct radeon_cs_reloc *relocs; 859 struct radeon_cs_reloc **relocs_ptr; 860 struct list_head validated; 861 unsigned dma_reloc_idx; 862 /* indices of various chunks */ 863 int chunk_ib_idx; 864 int chunk_relocs_idx; 865 int chunk_flags_idx; 866 int chunk_const_ib_idx; 867 struct radeon_ib ib; 868 struct radeon_ib const_ib; 869 void *track; 870 unsigned family; 871 int parser_error; 872 u32 cs_flags; 873 u32 ring; 874 s32 priority; 875}; 876 877extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); 878extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); 879 880struct radeon_cs_packet { 881 unsigned idx; 882 unsigned type; 883 unsigned reg; 884 unsigned opcode; 885 int count; 886 unsigned one_reg_wr; 887}; 888 889typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p, 890 struct radeon_cs_packet *pkt, 891 unsigned idx, unsigned reg); 892typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, 893 struct radeon_cs_packet *pkt); 894 895 896/* 897 * AGP 898 */ 899int radeon_agp_init(struct radeon_device *rdev); 900void radeon_agp_resume(struct radeon_device *rdev); 901void radeon_agp_suspend(struct radeon_device *rdev); 902void radeon_agp_fini(struct radeon_device *rdev); 903 904 905/* 906 * Writeback 907 */ 908struct radeon_wb { 909 struct radeon_bo *wb_obj; 910 volatile uint32_t *wb; 911 uint64_t gpu_addr; 912 bool enabled; 913 bool use_event; 914}; 915 916#define RADEON_WB_SCRATCH_OFFSET 0 917#define RADEON_WB_RING0_NEXT_RPTR 256 918#define RADEON_WB_CP_RPTR_OFFSET 1024 919#define RADEON_WB_CP1_RPTR_OFFSET 1280 920#define RADEON_WB_CP2_RPTR_OFFSET 1536 921#define R600_WB_DMA_RPTR_OFFSET 1792 922#define R600_WB_IH_WPTR_OFFSET 2048 923#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 924#define R600_WB_EVENT_OFFSET 3072 925 926/** 927 * struct radeon_pm - power management datas 928 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) 929 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) 930 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) 931 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) 932 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) 933 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) 934 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) 935 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) 936 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) 937 * @sclk: GPU clock Mhz (core bandwidth depends of this clock) 938 * @needed_bandwidth: current bandwidth needs 939 * 940 * It keeps track of various data needed to take powermanagement decision. 941 * Bandwidth need is used to determine minimun clock of the GPU and memory. 942 * Equation between gpu/memory clock and available bandwidth is hw dependent 943 * (type of memory, bus size, efficiency, ...) 944 */ 945 946enum radeon_pm_method { 947 PM_METHOD_PROFILE, 948 PM_METHOD_DYNPM, 949}; 950 951enum radeon_dynpm_state { 952 DYNPM_STATE_DISABLED, 953 DYNPM_STATE_MINIMUM, 954 DYNPM_STATE_PAUSED, 955 DYNPM_STATE_ACTIVE, 956 DYNPM_STATE_SUSPENDED, 957}; 958enum radeon_dynpm_action { 959 DYNPM_ACTION_NONE, 960 DYNPM_ACTION_MINIMUM, 961 DYNPM_ACTION_DOWNCLOCK, 962 DYNPM_ACTION_UPCLOCK, 963 DYNPM_ACTION_DEFAULT 964}; 965 966enum radeon_voltage_type { 967 VOLTAGE_NONE = 0, 968 VOLTAGE_GPIO, 969 VOLTAGE_VDDC, 970 VOLTAGE_SW 971}; 972 973enum radeon_pm_state_type { 974 POWER_STATE_TYPE_DEFAULT, 975 POWER_STATE_TYPE_POWERSAVE, 976 POWER_STATE_TYPE_BATTERY, 977 POWER_STATE_TYPE_BALANCED, 978 POWER_STATE_TYPE_PERFORMANCE, 979}; 980 981enum radeon_pm_profile_type { 982 PM_PROFILE_DEFAULT, 983 PM_PROFILE_AUTO, 984 PM_PROFILE_LOW, 985 PM_PROFILE_MID, 986 PM_PROFILE_HIGH, 987}; 988 989#define PM_PROFILE_DEFAULT_IDX 0 990#define PM_PROFILE_LOW_SH_IDX 1 991#define PM_PROFILE_MID_SH_IDX 2 992#define PM_PROFILE_HIGH_SH_IDX 3 993#define PM_PROFILE_LOW_MH_IDX 4 994#define PM_PROFILE_MID_MH_IDX 5 995#define PM_PROFILE_HIGH_MH_IDX 6 996#define PM_PROFILE_MAX 7 997 998struct radeon_pm_profile { 999 int dpms_off_ps_idx; 1000 int dpms_on_ps_idx; 1001 int dpms_off_cm_idx; 1002 int dpms_on_cm_idx; 1003}; 1004 1005enum radeon_int_thermal_type { 1006 THERMAL_TYPE_NONE, 1007 THERMAL_TYPE_RV6XX, 1008 THERMAL_TYPE_RV770, 1009 THERMAL_TYPE_EVERGREEN, 1010 THERMAL_TYPE_SUMO, 1011 THERMAL_TYPE_NI, 1012 THERMAL_TYPE_SI, 1013}; 1014 1015struct radeon_voltage { 1016 enum radeon_voltage_type type; 1017 /* gpio voltage */ 1018 struct radeon_gpio_rec gpio; 1019 u32 delay; /* delay in usec from voltage drop to sclk change */ 1020 bool active_high; /* voltage drop is active when bit is high */ 1021 /* VDDC voltage */ 1022 u8 vddc_id; /* index into vddc voltage table */ 1023 u8 vddci_id; /* index into vddci voltage table */ 1024 bool vddci_enabled; 1025 /* r6xx+ sw */ 1026 u16 voltage; 1027 /* evergreen+ vddci */ 1028 u16 vddci; 1029}; 1030 1031/* clock mode flags */ 1032#define RADEON_PM_MODE_NO_DISPLAY (1 << 0) 1033 1034struct radeon_pm_clock_info { 1035 /* memory clock */ 1036 u32 mclk; 1037 /* engine clock */ 1038 u32 sclk; 1039 /* voltage info */ 1040 struct radeon_voltage voltage; 1041 /* standardized clock flags */ 1042 u32 flags; 1043}; 1044 1045/* state flags */ 1046#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0) 1047 1048struct radeon_power_state { 1049 enum radeon_pm_state_type type; 1050 struct radeon_pm_clock_info *clock_info; 1051 /* number of valid clock modes in this power state */ 1052 int num_clock_modes; 1053 struct radeon_pm_clock_info *default_clock_mode; 1054 /* standardized state flags */ 1055 u32 flags; 1056 u32 misc; /* vbios specific flags */ 1057 u32 misc2; /* vbios specific flags */ 1058 int pcie_lanes; /* pcie lanes */ 1059}; 1060 1061/* 1062 * Some modes are overclocked by very low value, accept them 1063 */ 1064#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */ 1065 1066struct radeon_pm { 1067 struct sx mutex; 1068 /* write locked while reprogramming mclk */ 1069 struct sx mclk_lock; 1070 u32 active_crtcs; 1071 int active_crtc_count; 1072 int req_vblank; 1073 bool vblank_sync; 1074 fixed20_12 max_bandwidth; 1075 fixed20_12 igp_sideport_mclk; 1076 fixed20_12 igp_system_mclk; 1077 fixed20_12 igp_ht_link_clk; 1078 fixed20_12 igp_ht_link_width; 1079 fixed20_12 k8_bandwidth; 1080 fixed20_12 sideport_bandwidth; 1081 fixed20_12 ht_bandwidth; 1082 fixed20_12 core_bandwidth; 1083 fixed20_12 sclk; 1084 fixed20_12 mclk; 1085 fixed20_12 needed_bandwidth; 1086 struct radeon_power_state *power_state; 1087 /* number of valid power states */ 1088 int num_power_states; 1089 int current_power_state_index; 1090 int current_clock_mode_index; 1091 int requested_power_state_index; 1092 int requested_clock_mode_index; 1093 int default_power_state_index; 1094 u32 current_sclk; 1095 u32 current_mclk; 1096 u16 current_vddc; 1097 u16 current_vddci; 1098 u32 default_sclk; 1099 u32 default_mclk; 1100 u16 default_vddc; 1101 u16 default_vddci; 1102 struct radeon_i2c_chan *i2c_bus; 1103 /* selected pm method */ 1104 enum radeon_pm_method pm_method; 1105 /* dynpm power management */ 1106#ifdef DUMBBELL_WIP 1107 struct delayed_work dynpm_idle_work; 1108#endif /* DUMBBELL_WIP */ 1109 enum radeon_dynpm_state dynpm_state; 1110 enum radeon_dynpm_action dynpm_planned_action; 1111 unsigned long dynpm_action_timeout; 1112 bool dynpm_can_upclock; 1113 bool dynpm_can_downclock; 1114 /* profile-based power management */ 1115 enum radeon_pm_profile_type profile; 1116 int profile_index; 1117 struct radeon_pm_profile profiles[PM_PROFILE_MAX]; 1118 /* internal thermal controller on rv6xx+ */ 1119 enum radeon_int_thermal_type int_thermal_type; 1120#ifdef DUMBBELL_WIP 1121 struct device *int_hwmon_dev; 1122#endif /* DUMBBELL_WIP */ 1123}; 1124 1125int radeon_pm_get_type_index(struct radeon_device *rdev, 1126 enum radeon_pm_state_type ps_type, 1127 int instance); 1128 1129struct r600_audio { 1130 int channels; 1131 int rate; 1132 int bits_per_sample; 1133 u8 status_bits; 1134 u8 category_code; 1135}; 1136 1137/* 1138 * Benchmarking 1139 */ 1140void radeon_benchmark(struct radeon_device *rdev, int test_number); 1141 1142 1143/* 1144 * Testing 1145 */ 1146void radeon_test_moves(struct radeon_device *rdev); 1147void radeon_test_ring_sync(struct radeon_device *rdev, 1148 struct radeon_ring *cpA, 1149 struct radeon_ring *cpB); 1150void radeon_test_syncing(struct radeon_device *rdev); 1151 1152 1153/* 1154 * Debugfs 1155 */ 1156struct radeon_debugfs { 1157 struct drm_info_list *files; 1158 unsigned num_files; 1159}; 1160 1161int radeon_debugfs_add_files(struct radeon_device *rdev, 1162 struct drm_info_list *files, 1163 unsigned nfiles); 1164int radeon_debugfs_fence_init(struct radeon_device *rdev); 1165 1166 1167/* 1168 * ASIC specific functions. 1169 */ 1170struct radeon_asic { 1171 int (*init)(struct radeon_device *rdev); 1172 void (*fini)(struct radeon_device *rdev); 1173 int (*resume)(struct radeon_device *rdev); 1174 int (*suspend)(struct radeon_device *rdev); 1175 void (*vga_set_state)(struct radeon_device *rdev, bool state); 1176 int (*asic_reset)(struct radeon_device *rdev); 1177 /* ioctl hw specific callback. Some hw might want to perform special 1178 * operation on specific ioctl. For instance on wait idle some hw 1179 * might want to perform and HDP flush through MMIO as it seems that 1180 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed 1181 * through ring. 1182 */ 1183 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); 1184 /* check if 3D engine is idle */ 1185 bool (*gui_idle)(struct radeon_device *rdev); 1186 /* wait for mc_idle */ 1187 int (*mc_wait_for_idle)(struct radeon_device *rdev); 1188 /* gart */ 1189 struct { 1190 void (*tlb_flush)(struct radeon_device *rdev); 1191 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); 1192 } gart; 1193 struct { 1194 int (*init)(struct radeon_device *rdev); 1195 void (*fini)(struct radeon_device *rdev); 1196 1197 u32 pt_ring_index; 1198 void (*set_page)(struct radeon_device *rdev, uint64_t pe, 1199 uint64_t addr, unsigned count, 1200 uint32_t incr, uint32_t flags); 1201 } vm; 1202 /* ring specific callbacks */ 1203 struct { 1204 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1205 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); 1206 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); 1207 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, 1208 struct radeon_semaphore *semaphore, bool emit_wait); 1209 int (*cs_parse)(struct radeon_cs_parser *p); 1210 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); 1211 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1212 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1213 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); 1214 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1215 } ring[RADEON_NUM_RINGS]; 1216 /* irqs */ 1217 struct { 1218 int (*set)(struct radeon_device *rdev); 1219 irqreturn_t (*process)(struct radeon_device *rdev); 1220 } irq; 1221 /* displays */ 1222 struct { 1223 /* display watermarks */ 1224 void (*bandwidth_update)(struct radeon_device *rdev); 1225 /* get frame count */ 1226 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 1227 /* wait for vblank */ 1228 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); 1229 /* set backlight level */ 1230 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); 1231 /* get backlight level */ 1232 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); 1233 } display; 1234 /* copy functions for bo handling */ 1235 struct { 1236 int (*blit)(struct radeon_device *rdev, 1237 uint64_t src_offset, 1238 uint64_t dst_offset, 1239 unsigned num_gpu_pages, 1240 struct radeon_fence **fence); 1241 u32 blit_ring_index; 1242 int (*dma)(struct radeon_device *rdev, 1243 uint64_t src_offset, 1244 uint64_t dst_offset, 1245 unsigned num_gpu_pages, 1246 struct radeon_fence **fence); 1247 u32 dma_ring_index; 1248 /* method used for bo copy */ 1249 int (*copy)(struct radeon_device *rdev, 1250 uint64_t src_offset, 1251 uint64_t dst_offset, 1252 unsigned num_gpu_pages, 1253 struct radeon_fence **fence); 1254 /* ring used for bo copies */ 1255 u32 copy_ring_index; 1256 } copy; 1257 /* surfaces */ 1258 struct { 1259 int (*set_reg)(struct radeon_device *rdev, int reg, 1260 uint32_t tiling_flags, uint32_t pitch, 1261 uint32_t offset, uint32_t obj_size); 1262 void (*clear_reg)(struct radeon_device *rdev, int reg); 1263 } surface; 1264 /* hotplug detect */ 1265 struct { 1266 void (*init)(struct radeon_device *rdev); 1267 void (*fini)(struct radeon_device *rdev); 1268 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1269 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1270 } hpd; 1271 /* power management */ 1272 struct { 1273 void (*misc)(struct radeon_device *rdev); 1274 void (*prepare)(struct radeon_device *rdev); 1275 void (*finish)(struct radeon_device *rdev); 1276 void (*init_profile)(struct radeon_device *rdev); 1277 void (*get_dynpm_state)(struct radeon_device *rdev); 1278 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 1279 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 1280 uint32_t (*get_memory_clock)(struct radeon_device *rdev); 1281 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 1282 int (*get_pcie_lanes)(struct radeon_device *rdev); 1283 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 1284 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 1285 } pm; 1286 /* pageflipping */ 1287 struct { 1288 void (*pre_page_flip)(struct radeon_device *rdev, int crtc); 1289 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); 1290 void (*post_page_flip)(struct radeon_device *rdev, int crtc); 1291 } pflip; 1292}; 1293 1294/* 1295 * Asic structures 1296 */ 1297struct r100_asic { 1298 const unsigned *reg_safe_bm; 1299 unsigned reg_safe_bm_size; 1300 u32 hdp_cntl; 1301}; 1302 1303struct r300_asic { 1304 const unsigned *reg_safe_bm; 1305 unsigned reg_safe_bm_size; 1306 u32 resync_scratch; 1307 u32 hdp_cntl; 1308}; 1309 1310struct r600_asic { 1311 unsigned max_pipes; 1312 unsigned max_tile_pipes; 1313 unsigned max_simds; 1314 unsigned max_backends; 1315 unsigned max_gprs; 1316 unsigned max_threads; 1317 unsigned max_stack_entries; 1318 unsigned max_hw_contexts; 1319 unsigned max_gs_threads; 1320 unsigned sx_max_export_size; 1321 unsigned sx_max_export_pos_size; 1322 unsigned sx_max_export_smx_size; 1323 unsigned sq_num_cf_insts; 1324 unsigned tiling_nbanks; 1325 unsigned tiling_npipes; 1326 unsigned tiling_group_size; 1327 unsigned tile_config; 1328 unsigned backend_map; 1329}; 1330 1331struct rv770_asic { 1332 unsigned max_pipes; 1333 unsigned max_tile_pipes; 1334 unsigned max_simds; 1335 unsigned max_backends; 1336 unsigned max_gprs; 1337 unsigned max_threads; 1338 unsigned max_stack_entries; 1339 unsigned max_hw_contexts; 1340 unsigned max_gs_threads; 1341 unsigned sx_max_export_size; 1342 unsigned sx_max_export_pos_size; 1343 unsigned sx_max_export_smx_size; 1344 unsigned sq_num_cf_insts; 1345 unsigned sx_num_of_sets; 1346 unsigned sc_prim_fifo_size; 1347 unsigned sc_hiz_tile_fifo_size; 1348 unsigned sc_earlyz_tile_fifo_fize; 1349 unsigned tiling_nbanks; 1350 unsigned tiling_npipes; 1351 unsigned tiling_group_size; 1352 unsigned tile_config; 1353 unsigned backend_map; 1354}; 1355 1356struct evergreen_asic { 1357 unsigned num_ses; 1358 unsigned max_pipes; 1359 unsigned max_tile_pipes; 1360 unsigned max_simds; 1361 unsigned max_backends; 1362 unsigned max_gprs; 1363 unsigned max_threads; 1364 unsigned max_stack_entries; 1365 unsigned max_hw_contexts; 1366 unsigned max_gs_threads; 1367 unsigned sx_max_export_size; 1368 unsigned sx_max_export_pos_size; 1369 unsigned sx_max_export_smx_size; 1370 unsigned sq_num_cf_insts; 1371 unsigned sx_num_of_sets; 1372 unsigned sc_prim_fifo_size; 1373 unsigned sc_hiz_tile_fifo_size; 1374 unsigned sc_earlyz_tile_fifo_size; 1375 unsigned tiling_nbanks; 1376 unsigned tiling_npipes; 1377 unsigned tiling_group_size; 1378 unsigned tile_config; 1379 unsigned backend_map; 1380}; 1381 1382struct cayman_asic { 1383 unsigned max_shader_engines; 1384 unsigned max_pipes_per_simd; 1385 unsigned max_tile_pipes; 1386 unsigned max_simds_per_se; 1387 unsigned max_backends_per_se; 1388 unsigned max_texture_channel_caches; 1389 unsigned max_gprs; 1390 unsigned max_threads; 1391 unsigned max_gs_threads; 1392 unsigned max_stack_entries; 1393 unsigned sx_num_of_sets; 1394 unsigned sx_max_export_size; 1395 unsigned sx_max_export_pos_size; 1396 unsigned sx_max_export_smx_size; 1397 unsigned max_hw_contexts; 1398 unsigned sq_num_cf_insts; 1399 unsigned sc_prim_fifo_size; 1400 unsigned sc_hiz_tile_fifo_size; 1401 unsigned sc_earlyz_tile_fifo_size; 1402 1403 unsigned num_shader_engines; 1404 unsigned num_shader_pipes_per_simd; 1405 unsigned num_tile_pipes; 1406 unsigned num_simds_per_se; 1407 unsigned num_backends_per_se; 1408 unsigned backend_disable_mask_per_asic; 1409 unsigned backend_map; 1410 unsigned num_texture_channel_caches; 1411 unsigned mem_max_burst_length_bytes; 1412 unsigned mem_row_size_in_kb; 1413 unsigned shader_engine_tile_size; 1414 unsigned num_gpus; 1415 unsigned multi_gpu_tile_size; 1416 1417 unsigned tile_config; 1418}; 1419 1420struct si_asic { 1421 unsigned max_shader_engines; 1422 unsigned max_tile_pipes; 1423 unsigned max_cu_per_sh; 1424 unsigned max_sh_per_se; 1425 unsigned max_backends_per_se; 1426 unsigned max_texture_channel_caches; 1427 unsigned max_gprs; 1428 unsigned max_gs_threads; 1429 unsigned max_hw_contexts; 1430 unsigned sc_prim_fifo_size_frontend; 1431 unsigned sc_prim_fifo_size_backend; 1432 unsigned sc_hiz_tile_fifo_size; 1433 unsigned sc_earlyz_tile_fifo_size; 1434 1435 unsigned num_tile_pipes; 1436 unsigned num_backends_per_se; 1437 unsigned backend_disable_mask_per_asic; 1438 unsigned backend_map; 1439 unsigned num_texture_channel_caches; 1440 unsigned mem_max_burst_length_bytes; 1441 unsigned mem_row_size_in_kb; 1442 unsigned shader_engine_tile_size; 1443 unsigned num_gpus; 1444 unsigned multi_gpu_tile_size; 1445 1446 unsigned tile_config; 1447}; 1448 1449union radeon_asic_config { 1450 struct r300_asic r300; 1451 struct r100_asic r100; 1452 struct r600_asic r600; 1453 struct rv770_asic rv770; 1454 struct evergreen_asic evergreen; 1455 struct cayman_asic cayman; 1456 struct si_asic si; 1457}; 1458 1459/* 1460 * asic initizalization from radeon_asic.c 1461 */ 1462int radeon_asic_init(struct radeon_device *rdev); 1463 1464 1465/* 1466 * IOCTL. 1467 */ 1468int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 1469 struct drm_file *filp); 1470int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 1471 struct drm_file *filp); 1472int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, 1473 struct drm_file *file_priv); 1474int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, 1475 struct drm_file *file_priv); 1476int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1477 struct drm_file *file_priv); 1478int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 1479 struct drm_file *file_priv); 1480int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1481 struct drm_file *filp); 1482int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 1483 struct drm_file *filp); 1484int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 1485 struct drm_file *filp); 1486int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 1487 struct drm_file *filp); 1488int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 1489 struct drm_file *filp); 1490int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1491int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 1492 struct drm_file *filp); 1493int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 1494 struct drm_file *filp); 1495 1496/* VRAM scratch page for HDP bug, default vram page */ 1497struct r600_vram_scratch { 1498 struct radeon_bo *robj; 1499 volatile uint32_t *ptr; 1500 u64 gpu_addr; 1501}; 1502 1503/* 1504 * ACPI 1505 */ 1506struct radeon_atif_notification_cfg { 1507 bool enabled; 1508 int command_code; 1509}; 1510 1511struct radeon_atif_notifications { 1512 bool display_switch; 1513 bool expansion_mode_change; 1514 bool thermal_state; 1515 bool forced_power_state; 1516 bool system_power_state; 1517 bool display_conf_change; 1518 bool px_gfx_switch; 1519 bool brightness_change; 1520 bool dgpu_display_event; 1521}; 1522 1523struct radeon_atif_functions { 1524 bool system_params; 1525 bool sbios_requests; 1526 bool select_active_disp; 1527 bool lid_state; 1528 bool get_tv_standard; 1529 bool set_tv_standard; 1530 bool get_panel_expansion_mode; 1531 bool set_panel_expansion_mode; 1532 bool temperature_change; 1533 bool graphics_device_types; 1534}; 1535 1536struct radeon_atif { 1537 struct radeon_atif_notifications notifications; 1538 struct radeon_atif_functions functions; 1539 struct radeon_atif_notification_cfg notification_cfg; 1540 struct radeon_encoder *encoder_for_bl; 1541}; 1542 1543struct radeon_atcs_functions { 1544 bool get_ext_state; 1545 bool pcie_perf_req; 1546 bool pcie_dev_rdy; 1547 bool pcie_bus_width; 1548}; 1549 1550struct radeon_atcs { 1551 struct radeon_atcs_functions functions; 1552}; 1553 1554/* 1555 * Core structure, functions and helpers. 1556 */ 1557typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); 1558typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); 1559 1560struct radeon_device { 1561 device_t dev; 1562 struct drm_device *ddev; 1563 struct sx exclusive_lock; 1564 /* ASIC */ 1565 union radeon_asic_config config; 1566 enum radeon_family family; 1567 unsigned long flags; 1568 int usec_timeout; 1569 enum radeon_pll_errata pll_errata; 1570 int num_gb_pipes; 1571 int num_z_pipes; 1572 int disp_priority; 1573 /* BIOS */ 1574 uint8_t *bios; 1575 bool is_atom_bios; 1576 uint16_t bios_header_start; 1577 struct radeon_bo *stollen_vga_memory; 1578 /* Register mmio */ 1579 resource_size_t rmmio_base; 1580 resource_size_t rmmio_size; 1581 /* protects concurrent MM_INDEX/DATA based register access */ 1582 struct mtx mmio_idx_lock; 1583 int rmmio_rid; 1584 struct resource *rmmio; 1585 radeon_rreg_t mc_rreg; 1586 radeon_wreg_t mc_wreg; 1587 radeon_rreg_t pll_rreg; 1588 radeon_wreg_t pll_wreg; 1589 uint32_t pcie_reg_mask; 1590 radeon_rreg_t pciep_rreg; 1591 radeon_wreg_t pciep_wreg; 1592 /* io port */ 1593 int rio_rid; 1594 struct resource *rio_mem; 1595 resource_size_t rio_mem_size; 1596 struct radeon_clock clock; 1597 struct radeon_mc mc; 1598 struct radeon_gart gart; 1599 struct radeon_mode_info mode_info; 1600 struct radeon_scratch scratch; 1601 struct radeon_mman mman; 1602 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 1603 struct cv fence_queue; 1604 struct mtx fence_queue_mtx; 1605 struct sx ring_lock; 1606 struct radeon_ring ring[RADEON_NUM_RINGS]; 1607 bool ib_pool_ready; 1608 struct radeon_sa_manager ring_tmp_bo; 1609 struct radeon_irq irq; 1610 struct radeon_asic *asic; 1611 struct radeon_gem gem; 1612 struct radeon_pm pm; 1613 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1614 struct radeon_wb wb; 1615 struct radeon_dummy_page dummy_page; 1616 bool shutdown; 1617 bool suspend; 1618 bool need_dma32; 1619 bool accel_working; 1620 bool fictitious_range_registered; 1621 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 1622 const struct firmware *me_fw; /* all family ME firmware */ 1623 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1624 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1625 const struct firmware *mc_fw; /* NI MC firmware */ 1626 const struct firmware *ce_fw; /* SI CE firmware */ 1627 struct r600_blit r600_blit; 1628 struct r600_vram_scratch vram_scratch; 1629 int msi_enabled; /* msi enabled */ 1630 struct r600_ih ih; /* r6/700 interrupt ring */ 1631 struct si_rlc rlc; 1632 struct taskqueue *tq; 1633 struct task hotplug_work; 1634 struct task audio_work; 1635 int num_crtc; /* number of crtcs */ 1636 struct sx dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 1637 bool audio_enabled; 1638 struct r600_audio audio_status; /* audio stuff */ 1639 struct { 1640 ACPI_HANDLE handle; 1641 ACPI_NOTIFY_HANDLER notifier_call; 1642 } acpi; 1643 /* only one userspace can use Hyperz features or CMASK at a time */ 1644 struct drm_file *hyperz_filp; 1645 struct drm_file *cmask_filp; 1646 /* i2c buses */ 1647 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; 1648 /* debugfs */ 1649 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS]; 1650 unsigned debugfs_count; 1651 /* virtual memory */ 1652 struct radeon_vm_manager vm_manager; 1653 struct sx gpu_clock_mutex; 1654 /* ACPI interface */ 1655 struct radeon_atif atif; 1656 struct radeon_atcs atcs; 1657}; 1658 1659int radeon_device_init(struct radeon_device *rdev, 1660 struct drm_device *ddev, 1661 uint32_t flags); 1662void radeon_device_fini(struct radeon_device *rdev); 1663int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 1664 1665uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 1666 bool always_indirect); 1667void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 1668 bool always_indirect); 1669u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 1670void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1671 1672/* 1673 * Cast helper 1674 */ 1675#define to_radeon_fence(p) ((struct radeon_fence *)(p)) 1676 1677/* 1678 * Registers read & write functions. 1679 */ 1680#define RREG8(reg) bus_read_1((rdev->rmmio), (reg)) 1681#define WREG8(reg, v) bus_write_1((rdev->rmmio), (reg), v) 1682#define RREG16(reg) bus_read_2((rdev->rmmio), (reg)) 1683#define WREG16(reg, v) bus_write_2((rdev->rmmio), (reg), v) 1684#define RREG32(reg) r100_mm_rreg(rdev, (reg), false) 1685#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true) 1686#define DREG32(reg) DRM_INFO("REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 1687#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false) 1688#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true) 1689#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1690#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1691#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) 1692#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) 1693#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) 1694#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 1695#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 1696#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 1697#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg)) 1698#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) 1699#define WREG32_P(reg, val, mask) \ 1700 do { \ 1701 uint32_t tmp_ = RREG32(reg); \ 1702 tmp_ &= (mask); \ 1703 tmp_ |= ((val) & ~(mask)); \ 1704 WREG32(reg, tmp_); \ 1705 } while (0) 1706#define WREG32_PLL_P(reg, val, mask) \ 1707 do { \ 1708 uint32_t tmp_ = RREG32_PLL(reg); \ 1709 tmp_ &= (mask); \ 1710 tmp_ |= ((val) & ~(mask)); \ 1711 WREG32_PLL(reg, tmp_); \ 1712 } while (0) 1713#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false)) 1714#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 1715#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 1716 1717/* 1718 * Indirect registers accessor 1719 */ 1720static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 1721{ 1722 uint32_t r; 1723 1724 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 1725 r = RREG32(RADEON_PCIE_DATA); 1726 return r; 1727} 1728 1729static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1730{ 1731 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 1732 WREG32(RADEON_PCIE_DATA, (v)); 1733} 1734 1735void r100_pll_errata_after_index(struct radeon_device *rdev); 1736 1737 1738/* 1739 * ASICs helpers. 1740 */ 1741#define ASIC_IS_RN50(rdev) ((rdev->ddev->pci_device == 0x515e) || \ 1742 (rdev->ddev->pci_device == 0x5969)) 1743#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ 1744 (rdev->family == CHIP_RV200) || \ 1745 (rdev->family == CHIP_RS100) || \ 1746 (rdev->family == CHIP_RS200) || \ 1747 (rdev->family == CHIP_RV250) || \ 1748 (rdev->family == CHIP_RV280) || \ 1749 (rdev->family == CHIP_RS300)) 1750#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \ 1751 (rdev->family == CHIP_RV350) || \ 1752 (rdev->family == CHIP_R350) || \ 1753 (rdev->family == CHIP_RV380) || \ 1754 (rdev->family == CHIP_R420) || \ 1755 (rdev->family == CHIP_R423) || \ 1756 (rdev->family == CHIP_RV410) || \ 1757 (rdev->family == CHIP_RS400) || \ 1758 (rdev->family == CHIP_RS480)) 1759#define ASIC_IS_X2(rdev) ((rdev->ddev->pci_device == 0x9441) || \ 1760 (rdev->ddev->pci_device == 0x9443) || \ 1761 (rdev->ddev->pci_device == 0x944B) || \ 1762 (rdev->ddev->pci_device == 0x9506) || \ 1763 (rdev->ddev->pci_device == 0x9509) || \ 1764 (rdev->ddev->pci_device == 0x950F) || \ 1765 (rdev->ddev->pci_device == 0x689C) || \ 1766 (rdev->ddev->pci_device == 0x689D)) 1767#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1768#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ 1769 (rdev->family == CHIP_RS690) || \ 1770 (rdev->family == CHIP_RS740) || \ 1771 (rdev->family >= CHIP_R600)) 1772#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1773#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1774#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) 1775#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ 1776 (rdev->flags & RADEON_IS_IGP)) 1777#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS)) 1778#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA)) 1779#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ 1780 (rdev->flags & RADEON_IS_IGP)) 1781 1782/* 1783 * BIOS helpers. 1784 */ 1785#define RBIOS8(i) (rdev->bios[i]) 1786#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1787#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1788 1789int radeon_combios_init(struct radeon_device *rdev); 1790void radeon_combios_fini(struct radeon_device *rdev); 1791int radeon_atombios_init(struct radeon_device *rdev); 1792void radeon_atombios_fini(struct radeon_device *rdev); 1793 1794 1795/* 1796 * RING helpers. 1797 */ 1798#if !defined(DRM_DEBUG_CODE) || DRM_DEBUG_CODE == 0 1799static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) 1800{ 1801 ring->ring[ring->wptr++] = v; 1802 ring->wptr &= ring->ptr_mask; 1803 ring->count_dw--; 1804 ring->ring_free_dw--; 1805} 1806#else 1807/* With debugging this is just too big to inline */ 1808void radeon_ring_write(struct radeon_ring *ring, uint32_t v); 1809#endif 1810 1811/* 1812 * ASICs macro. 1813 */ 1814#define radeon_init(rdev) (rdev)->asic->init((rdev)) 1815#define radeon_fini(rdev) (rdev)->asic->fini((rdev)) 1816#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 1817#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 1818#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) 1819#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 1820#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 1821#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 1822#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 1823#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 1824#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 1825#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags))) 1826#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) 1827#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) 1828#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 1829#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) 1830#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 1831#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) 1832#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) 1833#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 1834#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 1835#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 1836#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l)) 1837#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) 1838#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) 1839#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 1840#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 1841#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) 1842#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) 1843#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index 1844#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index 1845#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index 1846#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev)) 1847#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e)) 1848#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev)) 1849#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e)) 1850#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev)) 1851#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) 1852#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) 1853#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) 1854#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) 1855#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) 1856#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev)) 1857#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev)) 1858#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h)) 1859#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h)) 1860#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) 1861#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev)) 1862#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev)) 1863#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) 1864#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) 1865#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) 1866#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) 1867#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) 1868#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) 1869#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) 1870#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) 1871 1872/* Common functions */ 1873/* AGP */ 1874extern int radeon_gpu_reset(struct radeon_device *rdev); 1875extern void radeon_agp_disable(struct radeon_device *rdev); 1876extern int radeon_modeset_init(struct radeon_device *rdev); 1877extern void radeon_modeset_fini(struct radeon_device *rdev); 1878extern bool radeon_card_posted(struct radeon_device *rdev); 1879extern void radeon_update_bandwidth_info(struct radeon_device *rdev); 1880extern void radeon_update_display_priority(struct radeon_device *rdev); 1881extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1882extern void radeon_scratch_init(struct radeon_device *rdev); 1883extern void radeon_wb_fini(struct radeon_device *rdev); 1884extern int radeon_wb_init(struct radeon_device *rdev); 1885extern void radeon_wb_disable(struct radeon_device *rdev); 1886extern void radeon_surface_init(struct radeon_device *rdev); 1887extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1888extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 1889extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 1890extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); 1891extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1892extern int radeon_resume_kms(struct drm_device *dev); 1893extern int radeon_suspend_kms(struct drm_device *dev); 1894extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 1895 1896/* 1897 * vm 1898 */ 1899int radeon_vm_manager_init(struct radeon_device *rdev); 1900void radeon_vm_manager_fini(struct radeon_device *rdev); 1901void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 1902void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 1903int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); 1904void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); 1905struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 1906 struct radeon_vm *vm, int ring); 1907void radeon_vm_fence(struct radeon_device *rdev, 1908 struct radeon_vm *vm, 1909 struct radeon_fence *fence); 1910uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 1911int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1912 struct radeon_vm *vm, 1913 struct radeon_bo *bo, 1914 struct ttm_mem_reg *mem); 1915void radeon_vm_bo_invalidate(struct radeon_device *rdev, 1916 struct radeon_bo *bo); 1917struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 1918 struct radeon_bo *bo); 1919struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, 1920 struct radeon_vm *vm, 1921 struct radeon_bo *bo); 1922int radeon_vm_bo_set_addr(struct radeon_device *rdev, 1923 struct radeon_bo_va *bo_va, 1924 uint64_t offset, 1925 uint32_t flags); 1926int radeon_vm_bo_rmv(struct radeon_device *rdev, 1927 struct radeon_bo_va *bo_va); 1928 1929/* audio */ 1930void r600_audio_update_hdmi(void *arg, int pending); 1931 1932/* 1933 * R600 vram scratch functions 1934 */ 1935int r600_vram_scratch_init(struct radeon_device *rdev); 1936void r600_vram_scratch_fini(struct radeon_device *rdev); 1937 1938/* 1939 * r600 cs checking helper 1940 */ 1941unsigned r600_mip_minify(unsigned size, unsigned level); 1942bool r600_fmt_is_valid_color(u32 format); 1943bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family); 1944int r600_fmt_get_blocksize(u32 format); 1945int r600_fmt_get_nblocksx(u32 format, u32 w); 1946int r600_fmt_get_nblocksy(u32 format, u32 h); 1947 1948/* 1949 * r600 functions used by radeon_encoder.c 1950 */ 1951struct radeon_hdmi_acr { 1952 u32 clock; 1953 1954 int n_32khz; 1955 int cts_32khz; 1956 1957 int n_44_1khz; 1958 int cts_44_1khz; 1959 1960 int n_48khz; 1961 int cts_48khz; 1962 1963}; 1964 1965extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); 1966 1967extern void r600_hdmi_enable(struct drm_encoder *encoder); 1968extern void r600_hdmi_disable(struct drm_encoder *encoder); 1969extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1970extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1971 u32 tiling_pipe_num, 1972 u32 max_rb_num, 1973 u32 total_max_rb_num, 1974 u32 enabled_rb_mask); 1975 1976/* 1977 * evergreen functions used by radeon_encoder.c 1978 */ 1979 1980extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1981 1982extern int ni_init_microcode(struct radeon_device *rdev); 1983extern int ni_mc_load_microcode(struct radeon_device *rdev); 1984extern void ni_fini_microcode(struct radeon_device *rdev); 1985 1986/* radeon_acpi.c */ 1987extern int radeon_acpi_init(struct radeon_device *rdev); 1988extern void radeon_acpi_fini(struct radeon_device *rdev); 1989 1990/* Prototypes added by @dumbbell. */ 1991 1992/* atombios_encoders.c */ 1993void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, 1994 struct drm_connector *drm_connector); 1995void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, 1996 uint32_t supported_device, u16 caps); 1997 1998/* radeon_atombios.c */ 1999bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, 2000 struct drm_display_mode *mode); 2001 2002/* radeon_combios.c */ 2003void radeon_combios_connected_scratch_regs(struct drm_connector *connector, 2004 struct drm_encoder *encoder, bool connected); 2005 2006/* radeon_connectors.c */ 2007void radeon_atombios_connected_scratch_regs(struct drm_connector *connector, 2008 struct drm_encoder *encoder, bool connected); 2009void radeon_add_legacy_connector(struct drm_device *dev, 2010 uint32_t connector_id, 2011 uint32_t supported_device, 2012 int connector_type, 2013 struct radeon_i2c_bus_rec *i2c_bus, 2014 uint16_t connector_object_id, 2015 struct radeon_hpd *hpd); 2016void radeon_add_atom_connector(struct drm_device *dev, 2017 uint32_t connector_id, 2018 uint32_t supported_device, 2019 int connector_type, 2020 struct radeon_i2c_bus_rec *i2c_bus, 2021 uint32_t igp_lane_info, 2022 uint16_t connector_object_id, 2023 struct radeon_hpd *hpd, 2024 struct radeon_router *router); 2025 2026/* radeon_encoders.c */ 2027uint32_t radeon_get_encoder_enum(struct drm_device *dev, 2028 uint32_t supported_device, uint8_t dac); 2029void radeon_link_encoder_connector(struct drm_device *dev); 2030 2031/* radeon_legacy_encoders.c */ 2032void radeon_add_legacy_encoder(struct drm_device *dev, 2033 uint32_t encoder_enum, uint32_t supported_device); 2034void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, 2035 struct drm_connector *drm_connector); 2036 2037/* radeon_pm.c */ 2038void radeon_pm_acpi_event_handler(struct radeon_device *rdev); 2039 2040/* radeon_ttm.c */ 2041int radeon_ttm_init(struct radeon_device *rdev); 2042void radeon_ttm_fini(struct radeon_device *rdev); 2043
|