31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm_crtc_helper.h> 34#include <dev/drm2/radeon/radeon_drm.h> 35#include "radeon_reg.h" 36#include "radeon.h" 37#include "atom.h" 38 39static const char radeon_family_name[][16] = { 40 "R100", 41 "RV100", 42 "RS100", 43 "RV200", 44 "RS200", 45 "R200", 46 "RV250", 47 "RS300", 48 "RV280", 49 "R300", 50 "R350", 51 "RV350", 52 "RV380", 53 "R420", 54 "R423", 55 "RV410", 56 "RS400", 57 "RS480", 58 "RS600", 59 "RS690", 60 "RS740", 61 "RV515", 62 "R520", 63 "RV530", 64 "RV560", 65 "RV570", 66 "R580", 67 "R600", 68 "RV610", 69 "RV630", 70 "RV670", 71 "RV620", 72 "RV635", 73 "RS780", 74 "RS880", 75 "RV770", 76 "RV730", 77 "RV710", 78 "RV740", 79 "CEDAR", 80 "REDWOOD", 81 "JUNIPER", 82 "CYPRESS", 83 "HEMLOCK", 84 "PALM", 85 "SUMO", 86 "SUMO2", 87 "BARTS", 88 "TURKS", 89 "CAICOS", 90 "CAYMAN", 91 "ARUBA", 92 "TAHITI", 93 "PITCAIRN", 94 "VERDE", 95 "LAST", 96}; 97 98/** 99 * radeon_surface_init - Clear GPU surface registers. 100 * 101 * @rdev: radeon_device pointer 102 * 103 * Clear GPU surface registers (r1xx-r5xx). 104 */ 105void radeon_surface_init(struct radeon_device *rdev) 106{ 107 /* FIXME: check this out */ 108 if (rdev->family < CHIP_R600) { 109 int i; 110 111 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 112 if (rdev->surface_regs[i].bo) 113 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 114 else 115 radeon_clear_surface_reg(rdev, i); 116 } 117 /* enable surfaces */ 118 WREG32(RADEON_SURFACE_CNTL, 0); 119 } 120} 121 122/* 123 * GPU scratch registers helpers function. 124 */ 125/** 126 * radeon_scratch_init - Init scratch register driver information. 127 * 128 * @rdev: radeon_device pointer 129 * 130 * Init CP scratch register driver information (r1xx-r5xx) 131 */ 132void radeon_scratch_init(struct radeon_device *rdev) 133{ 134 int i; 135 136 /* FIXME: check this out */ 137 if (rdev->family < CHIP_R300) { 138 rdev->scratch.num_reg = 5; 139 } else { 140 rdev->scratch.num_reg = 7; 141 } 142 rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 143 for (i = 0; i < rdev->scratch.num_reg; i++) { 144 rdev->scratch.free[i] = true; 145 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 146 } 147} 148 149/** 150 * radeon_scratch_get - Allocate a scratch register 151 * 152 * @rdev: radeon_device pointer 153 * @reg: scratch register mmio offset 154 * 155 * Allocate a CP scratch register for use by the driver (all asics). 156 * Returns 0 on success or -EINVAL on failure. 157 */ 158int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 159{ 160 int i; 161 162 for (i = 0; i < rdev->scratch.num_reg; i++) { 163 if (rdev->scratch.free[i]) { 164 rdev->scratch.free[i] = false; 165 *reg = rdev->scratch.reg[i]; 166 return 0; 167 } 168 } 169 return -EINVAL; 170} 171 172/** 173 * radeon_scratch_free - Free a scratch register 174 * 175 * @rdev: radeon_device pointer 176 * @reg: scratch register mmio offset 177 * 178 * Free a CP scratch register allocated for use by the driver (all asics) 179 */ 180void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 181{ 182 int i; 183 184 for (i = 0; i < rdev->scratch.num_reg; i++) { 185 if (rdev->scratch.reg[i] == reg) { 186 rdev->scratch.free[i] = true; 187 return; 188 } 189 } 190} 191 192/* 193 * radeon_wb_*() 194 * Writeback is the the method by which the the GPU updates special pages 195 * in memory with the status of certain GPU events (fences, ring pointers, 196 * etc.). 197 */ 198 199/** 200 * radeon_wb_disable - Disable Writeback 201 * 202 * @rdev: radeon_device pointer 203 * 204 * Disables Writeback (all asics). Used for suspend. 205 */ 206void radeon_wb_disable(struct radeon_device *rdev) 207{ 208 int r; 209 210 if (rdev->wb.wb_obj) { 211 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 212 if (unlikely(r != 0)) 213 return; 214 radeon_bo_kunmap(rdev->wb.wb_obj); 215 radeon_bo_unpin(rdev->wb.wb_obj); 216 radeon_bo_unreserve(rdev->wb.wb_obj); 217 } 218 rdev->wb.enabled = false; 219} 220 221/** 222 * radeon_wb_fini - Disable Writeback and free memory 223 * 224 * @rdev: radeon_device pointer 225 * 226 * Disables Writeback and frees the Writeback memory (all asics). 227 * Used at driver shutdown. 228 */ 229void radeon_wb_fini(struct radeon_device *rdev) 230{ 231 radeon_wb_disable(rdev); 232 if (rdev->wb.wb_obj) { 233 radeon_bo_unref(&rdev->wb.wb_obj); 234 rdev->wb.wb = NULL; 235 rdev->wb.wb_obj = NULL; 236 } 237} 238 239/** 240 * radeon_wb_init- Init Writeback driver info and allocate memory 241 * 242 * @rdev: radeon_device pointer 243 * 244 * Disables Writeback and frees the Writeback memory (all asics). 245 * Used at driver startup. 246 * Returns 0 on success or an -error on failure. 247 */ 248int radeon_wb_init(struct radeon_device *rdev) 249{ 250 int r; 251 void *wb_ptr; 252 253 if (rdev->wb.wb_obj == NULL) { 254 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 255 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj); 256 if (r) { 257 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 258 return r; 259 } 260 } 261 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 262 if (unlikely(r != 0)) { 263 radeon_wb_fini(rdev); 264 return r; 265 } 266 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 267 &rdev->wb.gpu_addr); 268 if (r) { 269 radeon_bo_unreserve(rdev->wb.wb_obj); 270 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 271 radeon_wb_fini(rdev); 272 return r; 273 } 274 wb_ptr = &rdev->wb.wb; 275 r = radeon_bo_kmap(rdev->wb.wb_obj, wb_ptr); 276 radeon_bo_unreserve(rdev->wb.wb_obj); 277 if (r) { 278 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 279 radeon_wb_fini(rdev); 280 return r; 281 } 282 283 /* clear wb memory */ 284 memset(*(void **)wb_ptr, 0, RADEON_GPU_PAGE_SIZE); 285 /* disable event_write fences */ 286 rdev->wb.use_event = false; 287 /* disabled via module param */ 288 if (radeon_no_wb == 1) { 289 rdev->wb.enabled = false; 290 } else { 291 if (rdev->flags & RADEON_IS_AGP) { 292 /* often unreliable on AGP */ 293 rdev->wb.enabled = false; 294 } else if (rdev->family < CHIP_R300) { 295 /* often unreliable on pre-r300 */ 296 rdev->wb.enabled = false; 297 } else { 298 rdev->wb.enabled = true; 299 /* event_write fences are only available on r600+ */ 300 if (rdev->family >= CHIP_R600) { 301 rdev->wb.use_event = true; 302 } 303 } 304 } 305 /* always use writeback/events on NI, APUs */ 306 if (rdev->family >= CHIP_PALM) { 307 rdev->wb.enabled = true; 308 rdev->wb.use_event = true; 309 } 310 311 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 312 313 return 0; 314} 315 316/** 317 * radeon_vram_location - try to find VRAM location 318 * @rdev: radeon device structure holding all necessary informations 319 * @mc: memory controller structure holding memory informations 320 * @base: base address at which to put VRAM 321 * 322 * Function will place try to place VRAM at base address provided 323 * as parameter (which is so far either PCI aperture address or 324 * for IGP TOM base address). 325 * 326 * If there is not enough space to fit the unvisible VRAM in the 32bits 327 * address space then we limit the VRAM size to the aperture. 328 * 329 * If we are using AGP and if the AGP aperture doesn't allow us to have 330 * room for all the VRAM than we restrict the VRAM to the PCI aperture 331 * size and print a warning. 332 * 333 * This function will never fails, worst case are limiting VRAM. 334 * 335 * Note: GTT start, end, size should be initialized before calling this 336 * function on AGP platform. 337 * 338 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 339 * this shouldn't be a problem as we are using the PCI aperture as a reference. 340 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 341 * not IGP. 342 * 343 * Note: we use mc_vram_size as on some board we need to program the mc to 344 * cover the whole aperture even if VRAM size is inferior to aperture size 345 * Novell bug 204882 + along with lots of ubuntu ones 346 * 347 * Note: when limiting vram it's safe to overwritte real_vram_size because 348 * we are not in case where real_vram_size is inferior to mc_vram_size (ie 349 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 350 * ones) 351 * 352 * Note: IGP TOM addr should be the same as the aperture addr, we don't 353 * explicitly check for that thought. 354 * 355 * FIXME: when reducing VRAM size align new size on power of 2. 356 */ 357void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 358{ 359 uint64_t limit = (uint64_t)radeon_vram_limit << 20; 360 361 mc->vram_start = base; 362 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 363 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 364 mc->real_vram_size = mc->aper_size; 365 mc->mc_vram_size = mc->aper_size; 366 } 367 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 368 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 369 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 370 mc->real_vram_size = mc->aper_size; 371 mc->mc_vram_size = mc->aper_size; 372 } 373 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 374 if (limit && limit < mc->real_vram_size) 375 mc->real_vram_size = limit; 376 dev_info(rdev->dev, "VRAM: %juM 0x%016jX - 0x%016jX (%juM used)\n", 377 (uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start, 378 (uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20); 379} 380 381/** 382 * radeon_gtt_location - try to find GTT location 383 * @rdev: radeon device structure holding all necessary informations 384 * @mc: memory controller structure holding memory informations 385 * 386 * Function will place try to place GTT before or after VRAM. 387 * 388 * If GTT size is bigger than space left then we ajust GTT size. 389 * Thus function will never fails. 390 * 391 * FIXME: when reducing GTT size align new size on power of 2. 392 */ 393void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 394{ 395 u64 size_af, size_bf; 396 397 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 398 size_bf = mc->vram_start & ~mc->gtt_base_align; 399 if (size_bf > size_af) { 400 if (mc->gtt_size > size_bf) { 401 dev_warn(rdev->dev, "limiting GTT\n"); 402 mc->gtt_size = size_bf; 403 } 404 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 405 } else { 406 if (mc->gtt_size > size_af) { 407 dev_warn(rdev->dev, "limiting GTT\n"); 408 mc->gtt_size = size_af; 409 } 410 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 411 } 412 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 413 dev_info(rdev->dev, "GTT: %juM 0x%016jX - 0x%016jX\n", 414 (uintmax_t)mc->gtt_size >> 20, (uintmax_t)mc->gtt_start, (uintmax_t)mc->gtt_end); 415} 416 417/* 418 * GPU helpers function. 419 */ 420/** 421 * radeon_card_posted - check if the hw has already been initialized 422 * 423 * @rdev: radeon_device pointer 424 * 425 * Check if the asic has been initialized (all asics). 426 * Used at driver startup. 427 * Returns true if initialized or false if not. 428 */ 429bool radeon_card_posted(struct radeon_device *rdev) 430{ 431 uint32_t reg; 432 433#ifdef DUMBBELL_WIP 434 if (efi_enabled(EFI_BOOT) && 435 rdev->dev->pci_subvendor == PCI_VENDOR_ID_APPLE) 436 return false; 437#endif /* DUMBBELL_WIP */ 438 439 /* first check CRTCs */ 440 if (ASIC_IS_DCE41(rdev)) { 441 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 442 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 443 if (reg & EVERGREEN_CRTC_MASTER_EN) 444 return true; 445 } else if (ASIC_IS_DCE4(rdev)) { 446 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 447 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 448 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 449 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 450 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 451 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 452 if (reg & EVERGREEN_CRTC_MASTER_EN) 453 return true; 454 } else if (ASIC_IS_AVIVO(rdev)) { 455 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 456 RREG32(AVIVO_D2CRTC_CONTROL); 457 if (reg & AVIVO_CRTC_EN) { 458 return true; 459 } 460 } else { 461 reg = RREG32(RADEON_CRTC_GEN_CNTL) | 462 RREG32(RADEON_CRTC2_GEN_CNTL); 463 if (reg & RADEON_CRTC_EN) { 464 return true; 465 } 466 } 467 468 /* then check MEM_SIZE, in case the crtcs are off */ 469 if (rdev->family >= CHIP_R600) 470 reg = RREG32(R600_CONFIG_MEMSIZE); 471 else 472 reg = RREG32(RADEON_CONFIG_MEMSIZE); 473 474 if (reg) 475 return true; 476 477 return false; 478 479} 480 481/** 482 * radeon_update_bandwidth_info - update display bandwidth params 483 * 484 * @rdev: radeon_device pointer 485 * 486 * Used when sclk/mclk are switched or display modes are set. 487 * params are used to calculate display watermarks (all asics) 488 */ 489void radeon_update_bandwidth_info(struct radeon_device *rdev) 490{ 491 fixed20_12 a; 492 u32 sclk = rdev->pm.current_sclk; 493 u32 mclk = rdev->pm.current_mclk; 494 495 /* sclk/mclk in Mhz */ 496 a.full = dfixed_const(100); 497 rdev->pm.sclk.full = dfixed_const(sclk); 498 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 499 rdev->pm.mclk.full = dfixed_const(mclk); 500 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 501 502 if (rdev->flags & RADEON_IS_IGP) { 503 a.full = dfixed_const(16); 504 /* core_bandwidth = sclk(Mhz) * 16 */ 505 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 506 } 507} 508 509/** 510 * radeon_boot_test_post_card - check and possibly initialize the hw 511 * 512 * @rdev: radeon_device pointer 513 * 514 * Check if the asic is initialized and if not, attempt to initialize 515 * it (all asics). 516 * Returns true if initialized or false if not. 517 */ 518bool radeon_boot_test_post_card(struct radeon_device *rdev) 519{ 520 if (radeon_card_posted(rdev)) 521 return true; 522 523 if (rdev->bios) { 524 DRM_INFO("GPU not posted. posting now...\n"); 525 if (rdev->is_atom_bios) 526 atom_asic_init(rdev->mode_info.atom_context); 527 else 528 radeon_combios_asic_init(rdev->ddev); 529 return true; 530 } else { 531 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 532 return false; 533 } 534} 535 536/** 537 * radeon_dummy_page_init - init dummy page used by the driver 538 * 539 * @rdev: radeon_device pointer 540 * 541 * Allocate the dummy page used by the driver (all asics). 542 * This dummy page is used by the driver as a filler for gart entries 543 * when pages are taken out of the GART 544 * Returns 0 on sucess, -ENOMEM on failure. 545 */ 546int radeon_dummy_page_init(struct radeon_device *rdev) 547{ 548 if (rdev->dummy_page.dmah) 549 return 0; 550 rdev->dummy_page.dmah = drm_pci_alloc(rdev->ddev, 551 PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXSIZE_32BIT); 552 if (rdev->dummy_page.dmah == NULL) 553 return -ENOMEM; 554 rdev->dummy_page.addr = rdev->dummy_page.dmah->busaddr; 555 return 0; 556} 557 558/** 559 * radeon_dummy_page_fini - free dummy page used by the driver 560 * 561 * @rdev: radeon_device pointer 562 * 563 * Frees the dummy page used by the driver (all asics). 564 */ 565void radeon_dummy_page_fini(struct radeon_device *rdev) 566{ 567 if (rdev->dummy_page.dmah == NULL) 568 return; 569 drm_pci_free(rdev->ddev, rdev->dummy_page.dmah); 570 rdev->dummy_page.dmah = NULL; 571 rdev->dummy_page.addr = 0; 572} 573 574 575/* ATOM accessor methods */ 576/* 577 * ATOM is an interpreted byte code stored in tables in the vbios. The 578 * driver registers callbacks to access registers and the interpreter 579 * in the driver parses the tables and executes then to program specific 580 * actions (set display modes, asic init, etc.). See radeon_atombios.c, 581 * atombios.h, and atom.c 582 */ 583 584/** 585 * cail_pll_read - read PLL register 586 * 587 * @info: atom card_info pointer 588 * @reg: PLL register offset 589 * 590 * Provides a PLL register accessor for the atom interpreter (r4xx+). 591 * Returns the value of the PLL register. 592 */ 593static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 594{ 595 struct radeon_device *rdev = info->dev->dev_private; 596 uint32_t r; 597 598 r = rdev->pll_rreg(rdev, reg); 599 return r; 600} 601 602/** 603 * cail_pll_write - write PLL register 604 * 605 * @info: atom card_info pointer 606 * @reg: PLL register offset 607 * @val: value to write to the pll register 608 * 609 * Provides a PLL register accessor for the atom interpreter (r4xx+). 610 */ 611static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 612{ 613 struct radeon_device *rdev = info->dev->dev_private; 614 615 rdev->pll_wreg(rdev, reg, val); 616} 617 618/** 619 * cail_mc_read - read MC (Memory Controller) register 620 * 621 * @info: atom card_info pointer 622 * @reg: MC register offset 623 * 624 * Provides an MC register accessor for the atom interpreter (r4xx+). 625 * Returns the value of the MC register. 626 */ 627static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 628{ 629 struct radeon_device *rdev = info->dev->dev_private; 630 uint32_t r; 631 632 r = rdev->mc_rreg(rdev, reg); 633 return r; 634} 635 636/** 637 * cail_mc_write - write MC (Memory Controller) register 638 * 639 * @info: atom card_info pointer 640 * @reg: MC register offset 641 * @val: value to write to the pll register 642 * 643 * Provides a MC register accessor for the atom interpreter (r4xx+). 644 */ 645static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 646{ 647 struct radeon_device *rdev = info->dev->dev_private; 648 649 rdev->mc_wreg(rdev, reg, val); 650} 651 652/** 653 * cail_reg_write - write MMIO register 654 * 655 * @info: atom card_info pointer 656 * @reg: MMIO register offset 657 * @val: value to write to the pll register 658 * 659 * Provides a MMIO register accessor for the atom interpreter (r4xx+). 660 */ 661static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 662{ 663 struct radeon_device *rdev = info->dev->dev_private; 664 665 WREG32(reg*4, val); 666} 667 668/** 669 * cail_reg_read - read MMIO register 670 * 671 * @info: atom card_info pointer 672 * @reg: MMIO register offset 673 * 674 * Provides an MMIO register accessor for the atom interpreter (r4xx+). 675 * Returns the value of the MMIO register. 676 */ 677static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 678{ 679 struct radeon_device *rdev = info->dev->dev_private; 680 uint32_t r; 681 682 r = RREG32(reg*4); 683 return r; 684} 685 686/** 687 * cail_ioreg_write - write IO register 688 * 689 * @info: atom card_info pointer 690 * @reg: IO register offset 691 * @val: value to write to the pll register 692 * 693 * Provides a IO register accessor for the atom interpreter (r4xx+). 694 */ 695static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 696{ 697 struct radeon_device *rdev = info->dev->dev_private; 698 699 WREG32_IO(reg*4, val); 700} 701 702/** 703 * cail_ioreg_read - read IO register 704 * 705 * @info: atom card_info pointer 706 * @reg: IO register offset 707 * 708 * Provides an IO register accessor for the atom interpreter (r4xx+). 709 * Returns the value of the IO register. 710 */ 711static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 712{ 713 struct radeon_device *rdev = info->dev->dev_private; 714 uint32_t r; 715 716 r = RREG32_IO(reg*4); 717 return r; 718} 719 720/** 721 * radeon_atombios_init - init the driver info and callbacks for atombios 722 * 723 * @rdev: radeon_device pointer 724 * 725 * Initializes the driver info and register access callbacks for the 726 * ATOM interpreter (r4xx+). 727 * Returns 0 on sucess, -ENOMEM on failure. 728 * Called at driver startup. 729 */ 730int radeon_atombios_init(struct radeon_device *rdev) 731{ 732 struct card_info *atom_card_info = 733 malloc(sizeof(struct card_info), 734 DRM_MEM_DRIVER, M_ZERO | M_WAITOK); 735 736 if (!atom_card_info) 737 return -ENOMEM; 738 739 rdev->mode_info.atom_card_info = atom_card_info; 740 atom_card_info->dev = rdev->ddev; 741 atom_card_info->reg_read = cail_reg_read; 742 atom_card_info->reg_write = cail_reg_write; 743 /* needed for iio ops */ 744 if (rdev->rio_mem) { 745 atom_card_info->ioreg_read = cail_ioreg_read; 746 atom_card_info->ioreg_write = cail_ioreg_write; 747 } else { 748 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 749 atom_card_info->ioreg_read = cail_reg_read; 750 atom_card_info->ioreg_write = cail_reg_write; 751 } 752 atom_card_info->mc_read = cail_mc_read; 753 atom_card_info->mc_write = cail_mc_write; 754 atom_card_info->pll_read = cail_pll_read; 755 atom_card_info->pll_write = cail_pll_write; 756 757 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 758 sx_init(&rdev->mode_info.atom_context->mutex, 759 "drm__radeon_device__mode_info__atom_context__mutex"); 760 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 761 atom_allocate_fb_scratch(rdev->mode_info.atom_context); 762 return 0; 763} 764 765/** 766 * radeon_atombios_fini - free the driver info and callbacks for atombios 767 * 768 * @rdev: radeon_device pointer 769 * 770 * Frees the driver info and register access callbacks for the ATOM 771 * interpreter (r4xx+). 772 * Called at driver shutdown. 773 */ 774void radeon_atombios_fini(struct radeon_device *rdev) 775{ 776 if (rdev->mode_info.atom_context) { 777 free(rdev->mode_info.atom_context->scratch, DRM_MEM_DRIVER); 778 atom_destroy(rdev->mode_info.atom_context); 779 } 780 free(rdev->mode_info.atom_card_info, DRM_MEM_DRIVER); 781} 782 783/* COMBIOS */ 784/* 785 * COMBIOS is the bios format prior to ATOM. It provides 786 * command tables similar to ATOM, but doesn't have a unified 787 * parser. See radeon_combios.c 788 */ 789 790/** 791 * radeon_combios_init - init the driver info for combios 792 * 793 * @rdev: radeon_device pointer 794 * 795 * Initializes the driver info for combios (r1xx-r3xx). 796 * Returns 0 on sucess. 797 * Called at driver startup. 798 */ 799int radeon_combios_init(struct radeon_device *rdev) 800{ 801 radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 802 return 0; 803} 804 805/** 806 * radeon_combios_fini - free the driver info for combios 807 * 808 * @rdev: radeon_device pointer 809 * 810 * Frees the driver info for combios (r1xx-r3xx). 811 * Called at driver shutdown. 812 */ 813void radeon_combios_fini(struct radeon_device *rdev) 814{ 815} 816 817#ifdef DUMBBELL_WIP 818/* if we get transitioned to only one device, take VGA back */ 819/** 820 * radeon_vga_set_decode - enable/disable vga decode 821 * 822 * @cookie: radeon_device pointer 823 * @state: enable/disable vga decode 824 * 825 * Enable/disable vga decode (all asics). 826 * Returns VGA resource flags. 827 */ 828static unsigned int radeon_vga_set_decode(void *cookie, bool state) 829{ 830 struct radeon_device *rdev = cookie; 831 radeon_vga_set_state(rdev, state); 832 if (state) 833 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 834 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 835 else 836 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 837} 838#endif /* DUMBBELL_WIP */ 839 840/** 841 * radeon_check_pot_argument - check that argument is a power of two 842 * 843 * @arg: value to check 844 * 845 * Validates that a certain argument is a power of two (all asics). 846 * Returns true if argument is valid. 847 */ 848static bool radeon_check_pot_argument(int arg) 849{ 850 return (arg & (arg - 1)) == 0; 851} 852 853/** 854 * radeon_check_arguments - validate module params 855 * 856 * @rdev: radeon_device pointer 857 * 858 * Validates certain module parameters and updates 859 * the associated values used by the driver (all asics). 860 */ 861static void radeon_check_arguments(struct radeon_device *rdev) 862{ 863 /* vramlimit must be a power of two */ 864 if (!radeon_check_pot_argument(radeon_vram_limit)) { 865 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 866 radeon_vram_limit); 867 radeon_vram_limit = 0; 868 } 869 870 /* gtt size must be power of two and greater or equal to 32M */ 871 if (radeon_gart_size < 32) { 872 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 873 radeon_gart_size); 874 radeon_gart_size = 512; 875 876 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 877 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 878 radeon_gart_size); 879 radeon_gart_size = 512; 880 } 881 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 882 883 /* AGP mode can only be -1, 1, 2, 4, 8 */ 884 switch (radeon_agpmode) { 885 case -1: 886 case 0: 887 case 1: 888 case 2: 889 case 4: 890 case 8: 891 break; 892 default: 893 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 894 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 895 radeon_agpmode = 0; 896 break; 897 } 898} 899 900/** 901 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is 902 * needed for waking up. 903 * 904 * @pdev: pci dev pointer 905 */ 906#ifdef DUMBBELL_WIP 907static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev) 908{ 909 910 /* 6600m in a macbook pro */ 911 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && 912 pdev->subsystem_device == 0x00e2) { 913 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n"); 914 return true; 915 } 916 917 return false; 918} 919#endif /* DUMBBELL_WIP */ 920 921/** 922 * radeon_switcheroo_set_state - set switcheroo state 923 * 924 * @pdev: pci dev pointer 925 * @state: vga switcheroo state 926 * 927 * Callback for the switcheroo driver. Suspends or resumes the 928 * the asics before or after it is powered up using ACPI methods. 929 */ 930#ifdef DUMBBELL_WIP 931static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 932{ 933 struct drm_device *dev = pci_get_drvdata(pdev); 934 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 935 if (state == VGA_SWITCHEROO_ON) { 936 unsigned d3_delay = dev->pdev->d3_delay; 937 938 printk(KERN_INFO "radeon: switched on\n"); 939 /* don't suspend or resume card normally */ 940 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 941 942 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev)) 943 dev->pdev->d3_delay = 20; 944 945 radeon_resume_kms(dev); 946 947 dev->pdev->d3_delay = d3_delay; 948 949 dev->switch_power_state = DRM_SWITCH_POWER_ON; 950 drm_kms_helper_poll_enable(dev); 951 } else { 952 printk(KERN_INFO "radeon: switched off\n"); 953 drm_kms_helper_poll_disable(dev); 954 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 955 radeon_suspend_kms(dev, pmm); 956 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 957 } 958} 959#endif /* DUMBBELL_WIP */ 960 961/** 962 * radeon_switcheroo_can_switch - see if switcheroo state can change 963 * 964 * @pdev: pci dev pointer 965 * 966 * Callback for the switcheroo driver. Check of the switcheroo 967 * state can be changed. 968 * Returns true if the state can be changed, false if not. 969 */ 970#ifdef DUMBBELL_WIP 971static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 972{ 973 struct drm_device *dev = pci_get_drvdata(pdev); 974 bool can_switch; 975 976 spin_lock(&dev->count_lock); 977 can_switch = (dev->open_count == 0); 978 spin_unlock(&dev->count_lock); 979 return can_switch; 980} 981 982static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { 983 .set_gpu_state = radeon_switcheroo_set_state, 984 .reprobe = NULL, 985 .can_switch = radeon_switcheroo_can_switch, 986}; 987#endif /* DUMBBELL_WIP */ 988 989/** 990 * radeon_device_init - initialize the driver 991 * 992 * @rdev: radeon_device pointer 993 * @pdev: drm dev pointer 994 * @flags: driver flags 995 * 996 * Initializes the driver info and hw (all asics). 997 * Returns 0 for success or an error on failure. 998 * Called at driver startup. 999 */ 1000int radeon_device_init(struct radeon_device *rdev, 1001 struct drm_device *ddev, 1002 uint32_t flags) 1003{ 1004 int r, i; 1005 int dma_bits; 1006 1007 rdev->shutdown = false; 1008 rdev->dev = ddev->device; 1009 rdev->ddev = ddev; 1010 rdev->flags = flags; 1011 rdev->family = flags & RADEON_FAMILY_MASK; 1012 rdev->is_atom_bios = false; 1013 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 1014 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 1015 rdev->accel_working = false; 1016 rdev->fictitious_range_registered = false;
| 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm_crtc_helper.h> 34#include <dev/drm2/radeon/radeon_drm.h> 35#include "radeon_reg.h" 36#include "radeon.h" 37#include "atom.h" 38 39static const char radeon_family_name[][16] = { 40 "R100", 41 "RV100", 42 "RS100", 43 "RV200", 44 "RS200", 45 "R200", 46 "RV250", 47 "RS300", 48 "RV280", 49 "R300", 50 "R350", 51 "RV350", 52 "RV380", 53 "R420", 54 "R423", 55 "RV410", 56 "RS400", 57 "RS480", 58 "RS600", 59 "RS690", 60 "RS740", 61 "RV515", 62 "R520", 63 "RV530", 64 "RV560", 65 "RV570", 66 "R580", 67 "R600", 68 "RV610", 69 "RV630", 70 "RV670", 71 "RV620", 72 "RV635", 73 "RS780", 74 "RS880", 75 "RV770", 76 "RV730", 77 "RV710", 78 "RV740", 79 "CEDAR", 80 "REDWOOD", 81 "JUNIPER", 82 "CYPRESS", 83 "HEMLOCK", 84 "PALM", 85 "SUMO", 86 "SUMO2", 87 "BARTS", 88 "TURKS", 89 "CAICOS", 90 "CAYMAN", 91 "ARUBA", 92 "TAHITI", 93 "PITCAIRN", 94 "VERDE", 95 "LAST", 96}; 97 98/** 99 * radeon_surface_init - Clear GPU surface registers. 100 * 101 * @rdev: radeon_device pointer 102 * 103 * Clear GPU surface registers (r1xx-r5xx). 104 */ 105void radeon_surface_init(struct radeon_device *rdev) 106{ 107 /* FIXME: check this out */ 108 if (rdev->family < CHIP_R600) { 109 int i; 110 111 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 112 if (rdev->surface_regs[i].bo) 113 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 114 else 115 radeon_clear_surface_reg(rdev, i); 116 } 117 /* enable surfaces */ 118 WREG32(RADEON_SURFACE_CNTL, 0); 119 } 120} 121 122/* 123 * GPU scratch registers helpers function. 124 */ 125/** 126 * radeon_scratch_init - Init scratch register driver information. 127 * 128 * @rdev: radeon_device pointer 129 * 130 * Init CP scratch register driver information (r1xx-r5xx) 131 */ 132void radeon_scratch_init(struct radeon_device *rdev) 133{ 134 int i; 135 136 /* FIXME: check this out */ 137 if (rdev->family < CHIP_R300) { 138 rdev->scratch.num_reg = 5; 139 } else { 140 rdev->scratch.num_reg = 7; 141 } 142 rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 143 for (i = 0; i < rdev->scratch.num_reg; i++) { 144 rdev->scratch.free[i] = true; 145 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 146 } 147} 148 149/** 150 * radeon_scratch_get - Allocate a scratch register 151 * 152 * @rdev: radeon_device pointer 153 * @reg: scratch register mmio offset 154 * 155 * Allocate a CP scratch register for use by the driver (all asics). 156 * Returns 0 on success or -EINVAL on failure. 157 */ 158int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 159{ 160 int i; 161 162 for (i = 0; i < rdev->scratch.num_reg; i++) { 163 if (rdev->scratch.free[i]) { 164 rdev->scratch.free[i] = false; 165 *reg = rdev->scratch.reg[i]; 166 return 0; 167 } 168 } 169 return -EINVAL; 170} 171 172/** 173 * radeon_scratch_free - Free a scratch register 174 * 175 * @rdev: radeon_device pointer 176 * @reg: scratch register mmio offset 177 * 178 * Free a CP scratch register allocated for use by the driver (all asics) 179 */ 180void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 181{ 182 int i; 183 184 for (i = 0; i < rdev->scratch.num_reg; i++) { 185 if (rdev->scratch.reg[i] == reg) { 186 rdev->scratch.free[i] = true; 187 return; 188 } 189 } 190} 191 192/* 193 * radeon_wb_*() 194 * Writeback is the the method by which the the GPU updates special pages 195 * in memory with the status of certain GPU events (fences, ring pointers, 196 * etc.). 197 */ 198 199/** 200 * radeon_wb_disable - Disable Writeback 201 * 202 * @rdev: radeon_device pointer 203 * 204 * Disables Writeback (all asics). Used for suspend. 205 */ 206void radeon_wb_disable(struct radeon_device *rdev) 207{ 208 int r; 209 210 if (rdev->wb.wb_obj) { 211 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 212 if (unlikely(r != 0)) 213 return; 214 radeon_bo_kunmap(rdev->wb.wb_obj); 215 radeon_bo_unpin(rdev->wb.wb_obj); 216 radeon_bo_unreserve(rdev->wb.wb_obj); 217 } 218 rdev->wb.enabled = false; 219} 220 221/** 222 * radeon_wb_fini - Disable Writeback and free memory 223 * 224 * @rdev: radeon_device pointer 225 * 226 * Disables Writeback and frees the Writeback memory (all asics). 227 * Used at driver shutdown. 228 */ 229void radeon_wb_fini(struct radeon_device *rdev) 230{ 231 radeon_wb_disable(rdev); 232 if (rdev->wb.wb_obj) { 233 radeon_bo_unref(&rdev->wb.wb_obj); 234 rdev->wb.wb = NULL; 235 rdev->wb.wb_obj = NULL; 236 } 237} 238 239/** 240 * radeon_wb_init- Init Writeback driver info and allocate memory 241 * 242 * @rdev: radeon_device pointer 243 * 244 * Disables Writeback and frees the Writeback memory (all asics). 245 * Used at driver startup. 246 * Returns 0 on success or an -error on failure. 247 */ 248int radeon_wb_init(struct radeon_device *rdev) 249{ 250 int r; 251 void *wb_ptr; 252 253 if (rdev->wb.wb_obj == NULL) { 254 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 255 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj); 256 if (r) { 257 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 258 return r; 259 } 260 } 261 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 262 if (unlikely(r != 0)) { 263 radeon_wb_fini(rdev); 264 return r; 265 } 266 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 267 &rdev->wb.gpu_addr); 268 if (r) { 269 radeon_bo_unreserve(rdev->wb.wb_obj); 270 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 271 radeon_wb_fini(rdev); 272 return r; 273 } 274 wb_ptr = &rdev->wb.wb; 275 r = radeon_bo_kmap(rdev->wb.wb_obj, wb_ptr); 276 radeon_bo_unreserve(rdev->wb.wb_obj); 277 if (r) { 278 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 279 radeon_wb_fini(rdev); 280 return r; 281 } 282 283 /* clear wb memory */ 284 memset(*(void **)wb_ptr, 0, RADEON_GPU_PAGE_SIZE); 285 /* disable event_write fences */ 286 rdev->wb.use_event = false; 287 /* disabled via module param */ 288 if (radeon_no_wb == 1) { 289 rdev->wb.enabled = false; 290 } else { 291 if (rdev->flags & RADEON_IS_AGP) { 292 /* often unreliable on AGP */ 293 rdev->wb.enabled = false; 294 } else if (rdev->family < CHIP_R300) { 295 /* often unreliable on pre-r300 */ 296 rdev->wb.enabled = false; 297 } else { 298 rdev->wb.enabled = true; 299 /* event_write fences are only available on r600+ */ 300 if (rdev->family >= CHIP_R600) { 301 rdev->wb.use_event = true; 302 } 303 } 304 } 305 /* always use writeback/events on NI, APUs */ 306 if (rdev->family >= CHIP_PALM) { 307 rdev->wb.enabled = true; 308 rdev->wb.use_event = true; 309 } 310 311 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 312 313 return 0; 314} 315 316/** 317 * radeon_vram_location - try to find VRAM location 318 * @rdev: radeon device structure holding all necessary informations 319 * @mc: memory controller structure holding memory informations 320 * @base: base address at which to put VRAM 321 * 322 * Function will place try to place VRAM at base address provided 323 * as parameter (which is so far either PCI aperture address or 324 * for IGP TOM base address). 325 * 326 * If there is not enough space to fit the unvisible VRAM in the 32bits 327 * address space then we limit the VRAM size to the aperture. 328 * 329 * If we are using AGP and if the AGP aperture doesn't allow us to have 330 * room for all the VRAM than we restrict the VRAM to the PCI aperture 331 * size and print a warning. 332 * 333 * This function will never fails, worst case are limiting VRAM. 334 * 335 * Note: GTT start, end, size should be initialized before calling this 336 * function on AGP platform. 337 * 338 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 339 * this shouldn't be a problem as we are using the PCI aperture as a reference. 340 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 341 * not IGP. 342 * 343 * Note: we use mc_vram_size as on some board we need to program the mc to 344 * cover the whole aperture even if VRAM size is inferior to aperture size 345 * Novell bug 204882 + along with lots of ubuntu ones 346 * 347 * Note: when limiting vram it's safe to overwritte real_vram_size because 348 * we are not in case where real_vram_size is inferior to mc_vram_size (ie 349 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 350 * ones) 351 * 352 * Note: IGP TOM addr should be the same as the aperture addr, we don't 353 * explicitly check for that thought. 354 * 355 * FIXME: when reducing VRAM size align new size on power of 2. 356 */ 357void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 358{ 359 uint64_t limit = (uint64_t)radeon_vram_limit << 20; 360 361 mc->vram_start = base; 362 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 363 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 364 mc->real_vram_size = mc->aper_size; 365 mc->mc_vram_size = mc->aper_size; 366 } 367 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 368 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 369 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 370 mc->real_vram_size = mc->aper_size; 371 mc->mc_vram_size = mc->aper_size; 372 } 373 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 374 if (limit && limit < mc->real_vram_size) 375 mc->real_vram_size = limit; 376 dev_info(rdev->dev, "VRAM: %juM 0x%016jX - 0x%016jX (%juM used)\n", 377 (uintmax_t)mc->mc_vram_size >> 20, (uintmax_t)mc->vram_start, 378 (uintmax_t)mc->vram_end, (uintmax_t)mc->real_vram_size >> 20); 379} 380 381/** 382 * radeon_gtt_location - try to find GTT location 383 * @rdev: radeon device structure holding all necessary informations 384 * @mc: memory controller structure holding memory informations 385 * 386 * Function will place try to place GTT before or after VRAM. 387 * 388 * If GTT size is bigger than space left then we ajust GTT size. 389 * Thus function will never fails. 390 * 391 * FIXME: when reducing GTT size align new size on power of 2. 392 */ 393void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 394{ 395 u64 size_af, size_bf; 396 397 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 398 size_bf = mc->vram_start & ~mc->gtt_base_align; 399 if (size_bf > size_af) { 400 if (mc->gtt_size > size_bf) { 401 dev_warn(rdev->dev, "limiting GTT\n"); 402 mc->gtt_size = size_bf; 403 } 404 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 405 } else { 406 if (mc->gtt_size > size_af) { 407 dev_warn(rdev->dev, "limiting GTT\n"); 408 mc->gtt_size = size_af; 409 } 410 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 411 } 412 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 413 dev_info(rdev->dev, "GTT: %juM 0x%016jX - 0x%016jX\n", 414 (uintmax_t)mc->gtt_size >> 20, (uintmax_t)mc->gtt_start, (uintmax_t)mc->gtt_end); 415} 416 417/* 418 * GPU helpers function. 419 */ 420/** 421 * radeon_card_posted - check if the hw has already been initialized 422 * 423 * @rdev: radeon_device pointer 424 * 425 * Check if the asic has been initialized (all asics). 426 * Used at driver startup. 427 * Returns true if initialized or false if not. 428 */ 429bool radeon_card_posted(struct radeon_device *rdev) 430{ 431 uint32_t reg; 432 433#ifdef DUMBBELL_WIP 434 if (efi_enabled(EFI_BOOT) && 435 rdev->dev->pci_subvendor == PCI_VENDOR_ID_APPLE) 436 return false; 437#endif /* DUMBBELL_WIP */ 438 439 /* first check CRTCs */ 440 if (ASIC_IS_DCE41(rdev)) { 441 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 442 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 443 if (reg & EVERGREEN_CRTC_MASTER_EN) 444 return true; 445 } else if (ASIC_IS_DCE4(rdev)) { 446 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 447 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 448 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 449 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 450 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 451 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 452 if (reg & EVERGREEN_CRTC_MASTER_EN) 453 return true; 454 } else if (ASIC_IS_AVIVO(rdev)) { 455 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 456 RREG32(AVIVO_D2CRTC_CONTROL); 457 if (reg & AVIVO_CRTC_EN) { 458 return true; 459 } 460 } else { 461 reg = RREG32(RADEON_CRTC_GEN_CNTL) | 462 RREG32(RADEON_CRTC2_GEN_CNTL); 463 if (reg & RADEON_CRTC_EN) { 464 return true; 465 } 466 } 467 468 /* then check MEM_SIZE, in case the crtcs are off */ 469 if (rdev->family >= CHIP_R600) 470 reg = RREG32(R600_CONFIG_MEMSIZE); 471 else 472 reg = RREG32(RADEON_CONFIG_MEMSIZE); 473 474 if (reg) 475 return true; 476 477 return false; 478 479} 480 481/** 482 * radeon_update_bandwidth_info - update display bandwidth params 483 * 484 * @rdev: radeon_device pointer 485 * 486 * Used when sclk/mclk are switched or display modes are set. 487 * params are used to calculate display watermarks (all asics) 488 */ 489void radeon_update_bandwidth_info(struct radeon_device *rdev) 490{ 491 fixed20_12 a; 492 u32 sclk = rdev->pm.current_sclk; 493 u32 mclk = rdev->pm.current_mclk; 494 495 /* sclk/mclk in Mhz */ 496 a.full = dfixed_const(100); 497 rdev->pm.sclk.full = dfixed_const(sclk); 498 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 499 rdev->pm.mclk.full = dfixed_const(mclk); 500 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 501 502 if (rdev->flags & RADEON_IS_IGP) { 503 a.full = dfixed_const(16); 504 /* core_bandwidth = sclk(Mhz) * 16 */ 505 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 506 } 507} 508 509/** 510 * radeon_boot_test_post_card - check and possibly initialize the hw 511 * 512 * @rdev: radeon_device pointer 513 * 514 * Check if the asic is initialized and if not, attempt to initialize 515 * it (all asics). 516 * Returns true if initialized or false if not. 517 */ 518bool radeon_boot_test_post_card(struct radeon_device *rdev) 519{ 520 if (radeon_card_posted(rdev)) 521 return true; 522 523 if (rdev->bios) { 524 DRM_INFO("GPU not posted. posting now...\n"); 525 if (rdev->is_atom_bios) 526 atom_asic_init(rdev->mode_info.atom_context); 527 else 528 radeon_combios_asic_init(rdev->ddev); 529 return true; 530 } else { 531 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 532 return false; 533 } 534} 535 536/** 537 * radeon_dummy_page_init - init dummy page used by the driver 538 * 539 * @rdev: radeon_device pointer 540 * 541 * Allocate the dummy page used by the driver (all asics). 542 * This dummy page is used by the driver as a filler for gart entries 543 * when pages are taken out of the GART 544 * Returns 0 on sucess, -ENOMEM on failure. 545 */ 546int radeon_dummy_page_init(struct radeon_device *rdev) 547{ 548 if (rdev->dummy_page.dmah) 549 return 0; 550 rdev->dummy_page.dmah = drm_pci_alloc(rdev->ddev, 551 PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXSIZE_32BIT); 552 if (rdev->dummy_page.dmah == NULL) 553 return -ENOMEM; 554 rdev->dummy_page.addr = rdev->dummy_page.dmah->busaddr; 555 return 0; 556} 557 558/** 559 * radeon_dummy_page_fini - free dummy page used by the driver 560 * 561 * @rdev: radeon_device pointer 562 * 563 * Frees the dummy page used by the driver (all asics). 564 */ 565void radeon_dummy_page_fini(struct radeon_device *rdev) 566{ 567 if (rdev->dummy_page.dmah == NULL) 568 return; 569 drm_pci_free(rdev->ddev, rdev->dummy_page.dmah); 570 rdev->dummy_page.dmah = NULL; 571 rdev->dummy_page.addr = 0; 572} 573 574 575/* ATOM accessor methods */ 576/* 577 * ATOM is an interpreted byte code stored in tables in the vbios. The 578 * driver registers callbacks to access registers and the interpreter 579 * in the driver parses the tables and executes then to program specific 580 * actions (set display modes, asic init, etc.). See radeon_atombios.c, 581 * atombios.h, and atom.c 582 */ 583 584/** 585 * cail_pll_read - read PLL register 586 * 587 * @info: atom card_info pointer 588 * @reg: PLL register offset 589 * 590 * Provides a PLL register accessor for the atom interpreter (r4xx+). 591 * Returns the value of the PLL register. 592 */ 593static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 594{ 595 struct radeon_device *rdev = info->dev->dev_private; 596 uint32_t r; 597 598 r = rdev->pll_rreg(rdev, reg); 599 return r; 600} 601 602/** 603 * cail_pll_write - write PLL register 604 * 605 * @info: atom card_info pointer 606 * @reg: PLL register offset 607 * @val: value to write to the pll register 608 * 609 * Provides a PLL register accessor for the atom interpreter (r4xx+). 610 */ 611static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 612{ 613 struct radeon_device *rdev = info->dev->dev_private; 614 615 rdev->pll_wreg(rdev, reg, val); 616} 617 618/** 619 * cail_mc_read - read MC (Memory Controller) register 620 * 621 * @info: atom card_info pointer 622 * @reg: MC register offset 623 * 624 * Provides an MC register accessor for the atom interpreter (r4xx+). 625 * Returns the value of the MC register. 626 */ 627static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 628{ 629 struct radeon_device *rdev = info->dev->dev_private; 630 uint32_t r; 631 632 r = rdev->mc_rreg(rdev, reg); 633 return r; 634} 635 636/** 637 * cail_mc_write - write MC (Memory Controller) register 638 * 639 * @info: atom card_info pointer 640 * @reg: MC register offset 641 * @val: value to write to the pll register 642 * 643 * Provides a MC register accessor for the atom interpreter (r4xx+). 644 */ 645static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 646{ 647 struct radeon_device *rdev = info->dev->dev_private; 648 649 rdev->mc_wreg(rdev, reg, val); 650} 651 652/** 653 * cail_reg_write - write MMIO register 654 * 655 * @info: atom card_info pointer 656 * @reg: MMIO register offset 657 * @val: value to write to the pll register 658 * 659 * Provides a MMIO register accessor for the atom interpreter (r4xx+). 660 */ 661static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 662{ 663 struct radeon_device *rdev = info->dev->dev_private; 664 665 WREG32(reg*4, val); 666} 667 668/** 669 * cail_reg_read - read MMIO register 670 * 671 * @info: atom card_info pointer 672 * @reg: MMIO register offset 673 * 674 * Provides an MMIO register accessor for the atom interpreter (r4xx+). 675 * Returns the value of the MMIO register. 676 */ 677static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 678{ 679 struct radeon_device *rdev = info->dev->dev_private; 680 uint32_t r; 681 682 r = RREG32(reg*4); 683 return r; 684} 685 686/** 687 * cail_ioreg_write - write IO register 688 * 689 * @info: atom card_info pointer 690 * @reg: IO register offset 691 * @val: value to write to the pll register 692 * 693 * Provides a IO register accessor for the atom interpreter (r4xx+). 694 */ 695static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 696{ 697 struct radeon_device *rdev = info->dev->dev_private; 698 699 WREG32_IO(reg*4, val); 700} 701 702/** 703 * cail_ioreg_read - read IO register 704 * 705 * @info: atom card_info pointer 706 * @reg: IO register offset 707 * 708 * Provides an IO register accessor for the atom interpreter (r4xx+). 709 * Returns the value of the IO register. 710 */ 711static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 712{ 713 struct radeon_device *rdev = info->dev->dev_private; 714 uint32_t r; 715 716 r = RREG32_IO(reg*4); 717 return r; 718} 719 720/** 721 * radeon_atombios_init - init the driver info and callbacks for atombios 722 * 723 * @rdev: radeon_device pointer 724 * 725 * Initializes the driver info and register access callbacks for the 726 * ATOM interpreter (r4xx+). 727 * Returns 0 on sucess, -ENOMEM on failure. 728 * Called at driver startup. 729 */ 730int radeon_atombios_init(struct radeon_device *rdev) 731{ 732 struct card_info *atom_card_info = 733 malloc(sizeof(struct card_info), 734 DRM_MEM_DRIVER, M_ZERO | M_WAITOK); 735 736 if (!atom_card_info) 737 return -ENOMEM; 738 739 rdev->mode_info.atom_card_info = atom_card_info; 740 atom_card_info->dev = rdev->ddev; 741 atom_card_info->reg_read = cail_reg_read; 742 atom_card_info->reg_write = cail_reg_write; 743 /* needed for iio ops */ 744 if (rdev->rio_mem) { 745 atom_card_info->ioreg_read = cail_ioreg_read; 746 atom_card_info->ioreg_write = cail_ioreg_write; 747 } else { 748 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 749 atom_card_info->ioreg_read = cail_reg_read; 750 atom_card_info->ioreg_write = cail_reg_write; 751 } 752 atom_card_info->mc_read = cail_mc_read; 753 atom_card_info->mc_write = cail_mc_write; 754 atom_card_info->pll_read = cail_pll_read; 755 atom_card_info->pll_write = cail_pll_write; 756 757 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 758 sx_init(&rdev->mode_info.atom_context->mutex, 759 "drm__radeon_device__mode_info__atom_context__mutex"); 760 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 761 atom_allocate_fb_scratch(rdev->mode_info.atom_context); 762 return 0; 763} 764 765/** 766 * radeon_atombios_fini - free the driver info and callbacks for atombios 767 * 768 * @rdev: radeon_device pointer 769 * 770 * Frees the driver info and register access callbacks for the ATOM 771 * interpreter (r4xx+). 772 * Called at driver shutdown. 773 */ 774void radeon_atombios_fini(struct radeon_device *rdev) 775{ 776 if (rdev->mode_info.atom_context) { 777 free(rdev->mode_info.atom_context->scratch, DRM_MEM_DRIVER); 778 atom_destroy(rdev->mode_info.atom_context); 779 } 780 free(rdev->mode_info.atom_card_info, DRM_MEM_DRIVER); 781} 782 783/* COMBIOS */ 784/* 785 * COMBIOS is the bios format prior to ATOM. It provides 786 * command tables similar to ATOM, but doesn't have a unified 787 * parser. See radeon_combios.c 788 */ 789 790/** 791 * radeon_combios_init - init the driver info for combios 792 * 793 * @rdev: radeon_device pointer 794 * 795 * Initializes the driver info for combios (r1xx-r3xx). 796 * Returns 0 on sucess. 797 * Called at driver startup. 798 */ 799int radeon_combios_init(struct radeon_device *rdev) 800{ 801 radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 802 return 0; 803} 804 805/** 806 * radeon_combios_fini - free the driver info for combios 807 * 808 * @rdev: radeon_device pointer 809 * 810 * Frees the driver info for combios (r1xx-r3xx). 811 * Called at driver shutdown. 812 */ 813void radeon_combios_fini(struct radeon_device *rdev) 814{ 815} 816 817#ifdef DUMBBELL_WIP 818/* if we get transitioned to only one device, take VGA back */ 819/** 820 * radeon_vga_set_decode - enable/disable vga decode 821 * 822 * @cookie: radeon_device pointer 823 * @state: enable/disable vga decode 824 * 825 * Enable/disable vga decode (all asics). 826 * Returns VGA resource flags. 827 */ 828static unsigned int radeon_vga_set_decode(void *cookie, bool state) 829{ 830 struct radeon_device *rdev = cookie; 831 radeon_vga_set_state(rdev, state); 832 if (state) 833 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 834 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 835 else 836 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 837} 838#endif /* DUMBBELL_WIP */ 839 840/** 841 * radeon_check_pot_argument - check that argument is a power of two 842 * 843 * @arg: value to check 844 * 845 * Validates that a certain argument is a power of two (all asics). 846 * Returns true if argument is valid. 847 */ 848static bool radeon_check_pot_argument(int arg) 849{ 850 return (arg & (arg - 1)) == 0; 851} 852 853/** 854 * radeon_check_arguments - validate module params 855 * 856 * @rdev: radeon_device pointer 857 * 858 * Validates certain module parameters and updates 859 * the associated values used by the driver (all asics). 860 */ 861static void radeon_check_arguments(struct radeon_device *rdev) 862{ 863 /* vramlimit must be a power of two */ 864 if (!radeon_check_pot_argument(radeon_vram_limit)) { 865 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 866 radeon_vram_limit); 867 radeon_vram_limit = 0; 868 } 869 870 /* gtt size must be power of two and greater or equal to 32M */ 871 if (radeon_gart_size < 32) { 872 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 873 radeon_gart_size); 874 radeon_gart_size = 512; 875 876 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 877 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 878 radeon_gart_size); 879 radeon_gart_size = 512; 880 } 881 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 882 883 /* AGP mode can only be -1, 1, 2, 4, 8 */ 884 switch (radeon_agpmode) { 885 case -1: 886 case 0: 887 case 1: 888 case 2: 889 case 4: 890 case 8: 891 break; 892 default: 893 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 894 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 895 radeon_agpmode = 0; 896 break; 897 } 898} 899 900/** 901 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is 902 * needed for waking up. 903 * 904 * @pdev: pci dev pointer 905 */ 906#ifdef DUMBBELL_WIP 907static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev) 908{ 909 910 /* 6600m in a macbook pro */ 911 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && 912 pdev->subsystem_device == 0x00e2) { 913 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n"); 914 return true; 915 } 916 917 return false; 918} 919#endif /* DUMBBELL_WIP */ 920 921/** 922 * radeon_switcheroo_set_state - set switcheroo state 923 * 924 * @pdev: pci dev pointer 925 * @state: vga switcheroo state 926 * 927 * Callback for the switcheroo driver. Suspends or resumes the 928 * the asics before or after it is powered up using ACPI methods. 929 */ 930#ifdef DUMBBELL_WIP 931static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 932{ 933 struct drm_device *dev = pci_get_drvdata(pdev); 934 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 935 if (state == VGA_SWITCHEROO_ON) { 936 unsigned d3_delay = dev->pdev->d3_delay; 937 938 printk(KERN_INFO "radeon: switched on\n"); 939 /* don't suspend or resume card normally */ 940 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 941 942 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev)) 943 dev->pdev->d3_delay = 20; 944 945 radeon_resume_kms(dev); 946 947 dev->pdev->d3_delay = d3_delay; 948 949 dev->switch_power_state = DRM_SWITCH_POWER_ON; 950 drm_kms_helper_poll_enable(dev); 951 } else { 952 printk(KERN_INFO "radeon: switched off\n"); 953 drm_kms_helper_poll_disable(dev); 954 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 955 radeon_suspend_kms(dev, pmm); 956 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 957 } 958} 959#endif /* DUMBBELL_WIP */ 960 961/** 962 * radeon_switcheroo_can_switch - see if switcheroo state can change 963 * 964 * @pdev: pci dev pointer 965 * 966 * Callback for the switcheroo driver. Check of the switcheroo 967 * state can be changed. 968 * Returns true if the state can be changed, false if not. 969 */ 970#ifdef DUMBBELL_WIP 971static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 972{ 973 struct drm_device *dev = pci_get_drvdata(pdev); 974 bool can_switch; 975 976 spin_lock(&dev->count_lock); 977 can_switch = (dev->open_count == 0); 978 spin_unlock(&dev->count_lock); 979 return can_switch; 980} 981 982static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { 983 .set_gpu_state = radeon_switcheroo_set_state, 984 .reprobe = NULL, 985 .can_switch = radeon_switcheroo_can_switch, 986}; 987#endif /* DUMBBELL_WIP */ 988 989/** 990 * radeon_device_init - initialize the driver 991 * 992 * @rdev: radeon_device pointer 993 * @pdev: drm dev pointer 994 * @flags: driver flags 995 * 996 * Initializes the driver info and hw (all asics). 997 * Returns 0 for success or an error on failure. 998 * Called at driver startup. 999 */ 1000int radeon_device_init(struct radeon_device *rdev, 1001 struct drm_device *ddev, 1002 uint32_t flags) 1003{ 1004 int r, i; 1005 int dma_bits; 1006 1007 rdev->shutdown = false; 1008 rdev->dev = ddev->device; 1009 rdev->ddev = ddev; 1010 rdev->flags = flags; 1011 rdev->family = flags & RADEON_FAMILY_MASK; 1012 rdev->is_atom_bios = false; 1013 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 1014 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 1015 rdev->accel_working = false; 1016 rdev->fictitious_range_registered = false;
|
1017 /* set up ring ids */ 1018 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1019 rdev->ring[i].idx = i; 1020 } 1021 1022 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1023 radeon_family_name[rdev->family], ddev->pci_vendor, ddev->pci_device, 1024 ddev->pci_subvendor, ddev->pci_subdevice); 1025 1026 /* mutex initialization are all done here so we 1027 * can recall function without having locking issues */ 1028 sx_init(&rdev->ring_lock, "drm__radeon_device__ring_lock"); 1029 sx_init(&rdev->dc_hw_i2c_mutex, "drm__radeon_device__dc_hw_i2c_mutex"); 1030 atomic_set(&rdev->ih.lock, 0); 1031 sx_init(&rdev->gem.mutex, "drm__radeon_device__gem__mutex"); 1032 sx_init(&rdev->pm.mutex, "drm__radeon_device__pm__mutex"); 1033 sx_init(&rdev->gpu_clock_mutex, "drm__radeon_device__gpu_clock_mutex"); 1034 sx_init(&rdev->pm.mclk_lock, "drm__radeon_device__pm__mclk_lock"); 1035 sx_init(&rdev->exclusive_lock, "drm__radeon_device__exclusive_lock"); 1036 DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue); 1037 r = radeon_gem_init(rdev); 1038 if (r) 1039 return r; 1040 /* initialize vm here */ 1041 sx_init(&rdev->vm_manager.lock, "drm__radeon_device__vm_manager__lock"); 1042 /* Adjust VM size here. 1043 * Currently set to 4GB ((1 << 20) 4k pages). 1044 * Max GPUVM size for cayman and SI is 40 bits. 1045 */ 1046 rdev->vm_manager.max_pfn = 1 << 20; 1047 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); 1048 1049 /* Set asic functions */ 1050 r = radeon_asic_init(rdev); 1051 if (r) 1052 return r; 1053 radeon_check_arguments(rdev); 1054 1055 /* all of the newer IGP chips have an internal gart 1056 * However some rs4xx report as AGP, so remove that here. 1057 */ 1058 if ((rdev->family >= CHIP_RS400) && 1059 (rdev->flags & RADEON_IS_IGP)) { 1060 rdev->flags &= ~RADEON_IS_AGP; 1061 } 1062 1063 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 1064 radeon_agp_disable(rdev); 1065 } 1066 1067 /* set DMA mask + need_dma32 flags. 1068 * PCIE - can handle 40-bits. 1069 * IGP - can handle 40-bits 1070 * AGP - generally dma32 is safest 1071 * PCI - dma32 for legacy pci gart, 40 bits on newer asics 1072 */ 1073 rdev->need_dma32 = false; 1074 if (rdev->flags & RADEON_IS_AGP) 1075 rdev->need_dma32 = true; 1076 if ((rdev->flags & RADEON_IS_PCI) && 1077 (rdev->family <= CHIP_RS740)) 1078 rdev->need_dma32 = true; 1079 1080 dma_bits = rdev->need_dma32 ? 32 : 40; 1081#ifdef DUMBBELL_WIP 1082 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1083 if (r) { 1084 rdev->need_dma32 = true; 1085 dma_bits = 32; 1086 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 1087 } 1088 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1089 if (r) { 1090 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 1091 printk(KERN_WARNING "radeon: No coherent DMA available.\n"); 1092 } 1093#endif /* DUMBBELL_WIP */ 1094 1095 /* Registers mapping */ 1096 /* TODO: block userspace mapping of io register */ 1097 DRM_SPININIT(&rdev->mmio_idx_lock, "drm__radeon_device__mmio_idx_lock"); 1098 rdev->rmmio_rid = PCIR_BAR(2); 1099 rdev->rmmio = bus_alloc_resource_any(rdev->dev, SYS_RES_MEMORY, 1100 &rdev->rmmio_rid, RF_ACTIVE | RF_SHAREABLE); 1101 if (rdev->rmmio == NULL) { 1102 return -ENOMEM; 1103 } 1104 rdev->rmmio_base = rman_get_start(rdev->rmmio); 1105 rdev->rmmio_size = rman_get_size(rdev->rmmio); 1106 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 1107 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 1108 1109 /* io port mapping */ 1110 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { 1111 uint32_t data; 1112 1113 data = pci_read_config(rdev->dev, PCIR_BAR(i), 4); 1114 if (PCI_BAR_IO(data)) { 1115 rdev->rio_rid = PCIR_BAR(i); 1116 rdev->rio_mem = bus_alloc_resource_any(rdev->dev, 1117 SYS_RES_IOPORT, &rdev->rio_rid, 1118 RF_ACTIVE | RF_SHAREABLE); 1119 break; 1120 } 1121 } 1122 if (rdev->rio_mem == NULL) 1123 DRM_ERROR("Unable to find PCI I/O BAR\n"); 1124 1125 rdev->tq = taskqueue_create("radeonkms", M_WAITOK, 1126 taskqueue_thread_enqueue, &rdev->tq); 1127 taskqueue_start_threads(&rdev->tq, 1, PWAIT, "radeon taskq"); 1128 1129#ifdef DUMBBELL_WIP 1130 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1131 /* this will fail for cards that aren't VGA class devices, just 1132 * ignore it */ 1133 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1134 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops); 1135#endif /* DUMBBELL_WIP */ 1136 1137 r = radeon_init(rdev); 1138 if (r) 1139 return r; 1140 1141 r = radeon_ib_ring_tests(rdev); 1142 if (r) 1143 DRM_ERROR("ib ring test failed (%d).\n", r); 1144 1145 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1146 /* Acceleration not working on AGP card try again 1147 * with fallback to PCI or PCIE GART 1148 */ 1149 radeon_asic_reset(rdev); 1150 radeon_fini(rdev); 1151 radeon_agp_disable(rdev); 1152 r = radeon_init(rdev); 1153 if (r) 1154 return r; 1155 } 1156 1157 DRM_INFO("%s: Taking over the fictitious range 0x%jx-0x%jx\n", 1158 __func__, (uintmax_t)rdev->mc.aper_base, 1159 (uintmax_t)rdev->mc.aper_base + rdev->mc.visible_vram_size); 1160 r = vm_phys_fictitious_reg_range( 1161 rdev->mc.aper_base, 1162 rdev->mc.aper_base + rdev->mc.visible_vram_size, 1163 VM_MEMATTR_WRITE_COMBINING); 1164 if (r != 0) { 1165 DRM_ERROR("Failed to register fictitious range " 1166 "0x%jx-0x%jx (%d).\n", (uintmax_t)rdev->mc.aper_base, 1167 (uintmax_t)rdev->mc.aper_base + rdev->mc.visible_vram_size, r); 1168 return (-r); 1169 } 1170 rdev->fictitious_range_registered = true;
| 1018 /* set up ring ids */ 1019 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1020 rdev->ring[i].idx = i; 1021 } 1022 1023 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1024 radeon_family_name[rdev->family], ddev->pci_vendor, ddev->pci_device, 1025 ddev->pci_subvendor, ddev->pci_subdevice); 1026 1027 /* mutex initialization are all done here so we 1028 * can recall function without having locking issues */ 1029 sx_init(&rdev->ring_lock, "drm__radeon_device__ring_lock"); 1030 sx_init(&rdev->dc_hw_i2c_mutex, "drm__radeon_device__dc_hw_i2c_mutex"); 1031 atomic_set(&rdev->ih.lock, 0); 1032 sx_init(&rdev->gem.mutex, "drm__radeon_device__gem__mutex"); 1033 sx_init(&rdev->pm.mutex, "drm__radeon_device__pm__mutex"); 1034 sx_init(&rdev->gpu_clock_mutex, "drm__radeon_device__gpu_clock_mutex"); 1035 sx_init(&rdev->pm.mclk_lock, "drm__radeon_device__pm__mclk_lock"); 1036 sx_init(&rdev->exclusive_lock, "drm__radeon_device__exclusive_lock"); 1037 DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue); 1038 r = radeon_gem_init(rdev); 1039 if (r) 1040 return r; 1041 /* initialize vm here */ 1042 sx_init(&rdev->vm_manager.lock, "drm__radeon_device__vm_manager__lock"); 1043 /* Adjust VM size here. 1044 * Currently set to 4GB ((1 << 20) 4k pages). 1045 * Max GPUVM size for cayman and SI is 40 bits. 1046 */ 1047 rdev->vm_manager.max_pfn = 1 << 20; 1048 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); 1049 1050 /* Set asic functions */ 1051 r = radeon_asic_init(rdev); 1052 if (r) 1053 return r; 1054 radeon_check_arguments(rdev); 1055 1056 /* all of the newer IGP chips have an internal gart 1057 * However some rs4xx report as AGP, so remove that here. 1058 */ 1059 if ((rdev->family >= CHIP_RS400) && 1060 (rdev->flags & RADEON_IS_IGP)) { 1061 rdev->flags &= ~RADEON_IS_AGP; 1062 } 1063 1064 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 1065 radeon_agp_disable(rdev); 1066 } 1067 1068 /* set DMA mask + need_dma32 flags. 1069 * PCIE - can handle 40-bits. 1070 * IGP - can handle 40-bits 1071 * AGP - generally dma32 is safest 1072 * PCI - dma32 for legacy pci gart, 40 bits on newer asics 1073 */ 1074 rdev->need_dma32 = false; 1075 if (rdev->flags & RADEON_IS_AGP) 1076 rdev->need_dma32 = true; 1077 if ((rdev->flags & RADEON_IS_PCI) && 1078 (rdev->family <= CHIP_RS740)) 1079 rdev->need_dma32 = true; 1080 1081 dma_bits = rdev->need_dma32 ? 32 : 40; 1082#ifdef DUMBBELL_WIP 1083 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1084 if (r) { 1085 rdev->need_dma32 = true; 1086 dma_bits = 32; 1087 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 1088 } 1089 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1090 if (r) { 1091 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 1092 printk(KERN_WARNING "radeon: No coherent DMA available.\n"); 1093 } 1094#endif /* DUMBBELL_WIP */ 1095 1096 /* Registers mapping */ 1097 /* TODO: block userspace mapping of io register */ 1098 DRM_SPININIT(&rdev->mmio_idx_lock, "drm__radeon_device__mmio_idx_lock"); 1099 rdev->rmmio_rid = PCIR_BAR(2); 1100 rdev->rmmio = bus_alloc_resource_any(rdev->dev, SYS_RES_MEMORY, 1101 &rdev->rmmio_rid, RF_ACTIVE | RF_SHAREABLE); 1102 if (rdev->rmmio == NULL) { 1103 return -ENOMEM; 1104 } 1105 rdev->rmmio_base = rman_get_start(rdev->rmmio); 1106 rdev->rmmio_size = rman_get_size(rdev->rmmio); 1107 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 1108 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 1109 1110 /* io port mapping */ 1111 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { 1112 uint32_t data; 1113 1114 data = pci_read_config(rdev->dev, PCIR_BAR(i), 4); 1115 if (PCI_BAR_IO(data)) { 1116 rdev->rio_rid = PCIR_BAR(i); 1117 rdev->rio_mem = bus_alloc_resource_any(rdev->dev, 1118 SYS_RES_IOPORT, &rdev->rio_rid, 1119 RF_ACTIVE | RF_SHAREABLE); 1120 break; 1121 } 1122 } 1123 if (rdev->rio_mem == NULL) 1124 DRM_ERROR("Unable to find PCI I/O BAR\n"); 1125 1126 rdev->tq = taskqueue_create("radeonkms", M_WAITOK, 1127 taskqueue_thread_enqueue, &rdev->tq); 1128 taskqueue_start_threads(&rdev->tq, 1, PWAIT, "radeon taskq"); 1129 1130#ifdef DUMBBELL_WIP 1131 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1132 /* this will fail for cards that aren't VGA class devices, just 1133 * ignore it */ 1134 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1135 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops); 1136#endif /* DUMBBELL_WIP */ 1137 1138 r = radeon_init(rdev); 1139 if (r) 1140 return r; 1141 1142 r = radeon_ib_ring_tests(rdev); 1143 if (r) 1144 DRM_ERROR("ib ring test failed (%d).\n", r); 1145 1146 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1147 /* Acceleration not working on AGP card try again 1148 * with fallback to PCI or PCIE GART 1149 */ 1150 radeon_asic_reset(rdev); 1151 radeon_fini(rdev); 1152 radeon_agp_disable(rdev); 1153 r = radeon_init(rdev); 1154 if (r) 1155 return r; 1156 } 1157 1158 DRM_INFO("%s: Taking over the fictitious range 0x%jx-0x%jx\n", 1159 __func__, (uintmax_t)rdev->mc.aper_base, 1160 (uintmax_t)rdev->mc.aper_base + rdev->mc.visible_vram_size); 1161 r = vm_phys_fictitious_reg_range( 1162 rdev->mc.aper_base, 1163 rdev->mc.aper_base + rdev->mc.visible_vram_size, 1164 VM_MEMATTR_WRITE_COMBINING); 1165 if (r != 0) { 1166 DRM_ERROR("Failed to register fictitious range " 1167 "0x%jx-0x%jx (%d).\n", (uintmax_t)rdev->mc.aper_base, 1168 (uintmax_t)rdev->mc.aper_base + rdev->mc.visible_vram_size, r); 1169 return (-r); 1170 } 1171 rdev->fictitious_range_registered = true;
|
1208 1209 radeon_fini(rdev); 1210#ifdef DUMBBELL_WIP 1211 vga_switcheroo_unregister_client(rdev->pdev); 1212 vga_client_register(rdev->pdev, NULL, NULL, NULL); 1213#endif /* DUMBBELL_WIP */ 1214 1215 if (rdev->tq != NULL) { 1216 taskqueue_free(rdev->tq); 1217 rdev->tq = NULL; 1218 } 1219 1220 if (rdev->rio_mem) 1221 bus_release_resource(rdev->dev, SYS_RES_IOPORT, rdev->rio_rid, 1222 rdev->rio_mem); 1223 rdev->rio_mem = NULL; 1224 bus_release_resource(rdev->dev, SYS_RES_MEMORY, rdev->rmmio_rid, 1225 rdev->rmmio); 1226 rdev->rmmio = NULL; 1227#ifdef DUMBBELL_WIP 1228 radeon_debugfs_remove_files(rdev); 1229#endif /* DUMBBELL_WIP */ 1230} 1231 1232 1233/* 1234 * Suspend & resume. 1235 */ 1236/** 1237 * radeon_suspend_kms - initiate device suspend 1238 * 1239 * @pdev: drm dev pointer 1240 * @state: suspend state 1241 * 1242 * Puts the hw in the suspend state (all asics). 1243 * Returns 0 for success or an error on failure. 1244 * Called at driver suspend. 1245 */ 1246int radeon_suspend_kms(struct drm_device *dev) 1247{ 1248 struct radeon_device *rdev; 1249 struct drm_crtc *crtc; 1250 struct drm_connector *connector; 1251 int i, r; 1252 bool force_completion = false; 1253 1254 if (dev == NULL || dev->dev_private == NULL) { 1255 return -ENODEV; 1256 } 1257#ifdef DUMBBELL_WIP 1258 if (state.event == PM_EVENT_PRETHAW) { 1259 return 0; 1260 } 1261#endif /* DUMBBELL_WIP */ 1262 rdev = dev->dev_private; 1263 1264 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1265 return 0; 1266 1267 drm_kms_helper_poll_disable(dev); 1268 1269 /* turn off display hw */ 1270 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1271 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1272 } 1273 1274 /* unpin the front buffers */ 1275 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1276 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 1277 struct radeon_bo *robj; 1278 1279 if (rfb == NULL || rfb->obj == NULL) { 1280 continue; 1281 } 1282 robj = gem_to_radeon_bo(rfb->obj); 1283 /* don't unpin kernel fb objects */ 1284 if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 1285 r = radeon_bo_reserve(robj, false); 1286 if (r == 0) { 1287 radeon_bo_unpin(robj); 1288 radeon_bo_unreserve(robj); 1289 } 1290 } 1291 } 1292 /* evict vram memory */ 1293 radeon_bo_evict_vram(rdev); 1294 1295 sx_xlock(&rdev->ring_lock); 1296 /* wait for gpu to finish processing current batch */ 1297 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1298 r = radeon_fence_wait_empty_locked(rdev, i); 1299 if (r) { 1300 /* delay GPU reset to resume */ 1301 force_completion = true; 1302 } 1303 } 1304 if (force_completion) { 1305 radeon_fence_driver_force_completion(rdev); 1306 } 1307 sx_xunlock(&rdev->ring_lock); 1308 1309 radeon_save_bios_scratch_regs(rdev); 1310 1311 radeon_pm_suspend(rdev); 1312 radeon_suspend(rdev); 1313 radeon_hpd_fini(rdev); 1314 /* evict remaining vram memory */ 1315 radeon_bo_evict_vram(rdev); 1316 1317 radeon_agp_suspend(rdev); 1318 1319 pci_save_state(device_get_parent(rdev->dev)); 1320#ifdef DUMBBELL_WIP 1321 if (state.event == PM_EVENT_SUSPEND) { 1322 /* Shut down the device */ 1323 pci_disable_device(dev->pdev); 1324#endif /* DUMBBELL_WIP */ 1325 pci_set_powerstate(dev->device, PCI_POWERSTATE_D3); 1326#ifdef DUMBBELL_WIP 1327 } 1328 console_lock(); 1329#endif /* DUMBBELL_WIP */ 1330 radeon_fbdev_set_suspend(rdev, 1); 1331#ifdef DUMBBELL_WIP 1332 console_unlock(); 1333#endif /* DUMBBELL_WIP */ 1334 return 0; 1335} 1336 1337/** 1338 * radeon_resume_kms - initiate device resume 1339 * 1340 * @pdev: drm dev pointer 1341 * 1342 * Bring the hw back to operating state (all asics). 1343 * Returns 0 for success or an error on failure. 1344 * Called at driver resume. 1345 */ 1346int radeon_resume_kms(struct drm_device *dev) 1347{ 1348 struct drm_connector *connector; 1349 struct radeon_device *rdev = dev->dev_private; 1350 int r; 1351 1352 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1353 return 0; 1354 1355#ifdef DUMBBELL_WIP 1356 console_lock(); 1357#endif /* DUMBBELL_WIP */ 1358 pci_set_powerstate(dev->device, PCI_POWERSTATE_D0); 1359 pci_restore_state(device_get_parent(rdev->dev)); 1360#ifdef DUMBBELL_WIP 1361 if (pci_enable_device(dev->pdev)) { 1362 console_unlock(); 1363 return -1; 1364 } 1365#endif /* DUMBBELL_WIP */ 1366 /* resume AGP if in use */ 1367 radeon_agp_resume(rdev); 1368 radeon_resume(rdev); 1369 1370 r = radeon_ib_ring_tests(rdev); 1371 if (r) 1372 DRM_ERROR("ib ring test failed (%d).\n", r); 1373 1374 radeon_pm_resume(rdev); 1375 radeon_restore_bios_scratch_regs(rdev); 1376 1377 radeon_fbdev_set_suspend(rdev, 0); 1378#ifdef DUMBBELL_WIP 1379 console_unlock(); 1380#endif /* DUMBBELL_WIP */ 1381 1382 /* init dig PHYs, disp eng pll */ 1383 if (rdev->is_atom_bios) { 1384 radeon_atom_encoder_init(rdev); 1385 radeon_atom_disp_eng_pll_init(rdev); 1386 /* turn on the BL */ 1387 if (rdev->mode_info.bl_encoder) { 1388 u8 bl_level = radeon_get_backlight_level(rdev, 1389 rdev->mode_info.bl_encoder); 1390 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 1391 bl_level); 1392 } 1393 } 1394 /* reset hpd state */ 1395 radeon_hpd_init(rdev); 1396 /* blat the mode back in */ 1397 drm_helper_resume_force_mode(dev); 1398 /* turn on display hw */ 1399 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1400 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1401 } 1402 1403 drm_kms_helper_poll_enable(dev); 1404 return 0; 1405} 1406 1407/** 1408 * radeon_gpu_reset - reset the asic 1409 * 1410 * @rdev: radeon device pointer 1411 * 1412 * Attempt the reset the GPU if it has hung (all asics). 1413 * Returns 0 for success or an error on failure. 1414 */ 1415int radeon_gpu_reset(struct radeon_device *rdev) 1416{ 1417 unsigned ring_sizes[RADEON_NUM_RINGS]; 1418 uint32_t *ring_data[RADEON_NUM_RINGS]; 1419 1420 bool saved = false; 1421 1422 int i, r; 1423 int resched; 1424 1425 sx_xlock(&rdev->exclusive_lock); 1426 radeon_save_bios_scratch_regs(rdev); 1427 /* block TTM */ 1428 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1429 radeon_suspend(rdev); 1430 1431 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1432 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], 1433 &ring_data[i]); 1434 if (ring_sizes[i]) { 1435 saved = true; 1436 dev_info(rdev->dev, "Saved %d dwords of commands " 1437 "on ring %d.\n", ring_sizes[i], i); 1438 } 1439 } 1440 1441retry: 1442 r = radeon_asic_reset(rdev); 1443 if (!r) { 1444 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); 1445 radeon_resume(rdev); 1446 } 1447 1448 radeon_restore_bios_scratch_regs(rdev); 1449 1450 if (!r) { 1451 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1452 radeon_ring_restore(rdev, &rdev->ring[i], 1453 ring_sizes[i], ring_data[i]); 1454 ring_sizes[i] = 0; 1455 ring_data[i] = NULL; 1456 } 1457 1458 r = radeon_ib_ring_tests(rdev); 1459 if (r) { 1460 dev_err(rdev->dev, "ib ring test failed (%d).\n", r); 1461 if (saved) { 1462 saved = false; 1463 radeon_suspend(rdev); 1464 goto retry; 1465 } 1466 } 1467 } else { 1468 radeon_fence_driver_force_completion(rdev); 1469 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1470 free(ring_data[i], DRM_MEM_DRIVER); 1471 } 1472 } 1473 1474 drm_helper_resume_force_mode(rdev->ddev); 1475 1476 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1477 if (r) { 1478 /* bad news, how to tell it to userspace ? */ 1479 dev_info(rdev->dev, "GPU reset failed\n"); 1480 } 1481 1482 sx_xunlock(&rdev->exclusive_lock); 1483 return r; 1484} 1485 1486 1487/* 1488 * Debugfs 1489 */ 1490#ifdef DUMBBELL_WIP 1491int radeon_debugfs_add_files(struct radeon_device *rdev, 1492 struct drm_info_list *files, 1493 unsigned nfiles) 1494{ 1495 unsigned i; 1496 1497 for (i = 0; i < rdev->debugfs_count; i++) { 1498 if (rdev->debugfs[i].files == files) { 1499 /* Already registered */ 1500 return 0; 1501 } 1502 } 1503 1504 i = rdev->debugfs_count + 1; 1505 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { 1506 DRM_ERROR("Reached maximum number of debugfs components.\n"); 1507 DRM_ERROR("Report so we increase " 1508 "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); 1509 return -EINVAL; 1510 } 1511 rdev->debugfs[rdev->debugfs_count].files = files; 1512 rdev->debugfs[rdev->debugfs_count].num_files = nfiles; 1513 rdev->debugfs_count = i; 1514#if defined(CONFIG_DEBUG_FS) 1515 drm_debugfs_create_files(files, nfiles, 1516 rdev->ddev->control->debugfs_root, 1517 rdev->ddev->control); 1518 drm_debugfs_create_files(files, nfiles, 1519 rdev->ddev->primary->debugfs_root, 1520 rdev->ddev->primary); 1521#endif 1522 return 0; 1523} 1524 1525static void radeon_debugfs_remove_files(struct radeon_device *rdev) 1526{ 1527#if defined(CONFIG_DEBUG_FS) 1528 unsigned i; 1529 1530 for (i = 0; i < rdev->debugfs_count; i++) { 1531 drm_debugfs_remove_files(rdev->debugfs[i].files, 1532 rdev->debugfs[i].num_files, 1533 rdev->ddev->control); 1534 drm_debugfs_remove_files(rdev->debugfs[i].files, 1535 rdev->debugfs[i].num_files, 1536 rdev->ddev->primary); 1537 } 1538#endif 1539} 1540 1541#if defined(CONFIG_DEBUG_FS) 1542int radeon_debugfs_init(struct drm_minor *minor) 1543{ 1544 return 0; 1545} 1546 1547void radeon_debugfs_cleanup(struct drm_minor *minor) 1548{ 1549} 1550#endif /* DUMBBELL_WIP */ 1551#endif
| 1234 1235 radeon_fini(rdev); 1236#ifdef DUMBBELL_WIP 1237 vga_switcheroo_unregister_client(rdev->pdev); 1238 vga_client_register(rdev->pdev, NULL, NULL, NULL); 1239#endif /* DUMBBELL_WIP */ 1240 1241 if (rdev->tq != NULL) { 1242 taskqueue_free(rdev->tq); 1243 rdev->tq = NULL; 1244 } 1245 1246 if (rdev->rio_mem) 1247 bus_release_resource(rdev->dev, SYS_RES_IOPORT, rdev->rio_rid, 1248 rdev->rio_mem); 1249 rdev->rio_mem = NULL; 1250 bus_release_resource(rdev->dev, SYS_RES_MEMORY, rdev->rmmio_rid, 1251 rdev->rmmio); 1252 rdev->rmmio = NULL; 1253#ifdef DUMBBELL_WIP 1254 radeon_debugfs_remove_files(rdev); 1255#endif /* DUMBBELL_WIP */ 1256} 1257 1258 1259/* 1260 * Suspend & resume. 1261 */ 1262/** 1263 * radeon_suspend_kms - initiate device suspend 1264 * 1265 * @pdev: drm dev pointer 1266 * @state: suspend state 1267 * 1268 * Puts the hw in the suspend state (all asics). 1269 * Returns 0 for success or an error on failure. 1270 * Called at driver suspend. 1271 */ 1272int radeon_suspend_kms(struct drm_device *dev) 1273{ 1274 struct radeon_device *rdev; 1275 struct drm_crtc *crtc; 1276 struct drm_connector *connector; 1277 int i, r; 1278 bool force_completion = false; 1279 1280 if (dev == NULL || dev->dev_private == NULL) { 1281 return -ENODEV; 1282 } 1283#ifdef DUMBBELL_WIP 1284 if (state.event == PM_EVENT_PRETHAW) { 1285 return 0; 1286 } 1287#endif /* DUMBBELL_WIP */ 1288 rdev = dev->dev_private; 1289 1290 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1291 return 0; 1292 1293 drm_kms_helper_poll_disable(dev); 1294 1295 /* turn off display hw */ 1296 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1297 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1298 } 1299 1300 /* unpin the front buffers */ 1301 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1302 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 1303 struct radeon_bo *robj; 1304 1305 if (rfb == NULL || rfb->obj == NULL) { 1306 continue; 1307 } 1308 robj = gem_to_radeon_bo(rfb->obj); 1309 /* don't unpin kernel fb objects */ 1310 if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 1311 r = radeon_bo_reserve(robj, false); 1312 if (r == 0) { 1313 radeon_bo_unpin(robj); 1314 radeon_bo_unreserve(robj); 1315 } 1316 } 1317 } 1318 /* evict vram memory */ 1319 radeon_bo_evict_vram(rdev); 1320 1321 sx_xlock(&rdev->ring_lock); 1322 /* wait for gpu to finish processing current batch */ 1323 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1324 r = radeon_fence_wait_empty_locked(rdev, i); 1325 if (r) { 1326 /* delay GPU reset to resume */ 1327 force_completion = true; 1328 } 1329 } 1330 if (force_completion) { 1331 radeon_fence_driver_force_completion(rdev); 1332 } 1333 sx_xunlock(&rdev->ring_lock); 1334 1335 radeon_save_bios_scratch_regs(rdev); 1336 1337 radeon_pm_suspend(rdev); 1338 radeon_suspend(rdev); 1339 radeon_hpd_fini(rdev); 1340 /* evict remaining vram memory */ 1341 radeon_bo_evict_vram(rdev); 1342 1343 radeon_agp_suspend(rdev); 1344 1345 pci_save_state(device_get_parent(rdev->dev)); 1346#ifdef DUMBBELL_WIP 1347 if (state.event == PM_EVENT_SUSPEND) { 1348 /* Shut down the device */ 1349 pci_disable_device(dev->pdev); 1350#endif /* DUMBBELL_WIP */ 1351 pci_set_powerstate(dev->device, PCI_POWERSTATE_D3); 1352#ifdef DUMBBELL_WIP 1353 } 1354 console_lock(); 1355#endif /* DUMBBELL_WIP */ 1356 radeon_fbdev_set_suspend(rdev, 1); 1357#ifdef DUMBBELL_WIP 1358 console_unlock(); 1359#endif /* DUMBBELL_WIP */ 1360 return 0; 1361} 1362 1363/** 1364 * radeon_resume_kms - initiate device resume 1365 * 1366 * @pdev: drm dev pointer 1367 * 1368 * Bring the hw back to operating state (all asics). 1369 * Returns 0 for success or an error on failure. 1370 * Called at driver resume. 1371 */ 1372int radeon_resume_kms(struct drm_device *dev) 1373{ 1374 struct drm_connector *connector; 1375 struct radeon_device *rdev = dev->dev_private; 1376 int r; 1377 1378 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1379 return 0; 1380 1381#ifdef DUMBBELL_WIP 1382 console_lock(); 1383#endif /* DUMBBELL_WIP */ 1384 pci_set_powerstate(dev->device, PCI_POWERSTATE_D0); 1385 pci_restore_state(device_get_parent(rdev->dev)); 1386#ifdef DUMBBELL_WIP 1387 if (pci_enable_device(dev->pdev)) { 1388 console_unlock(); 1389 return -1; 1390 } 1391#endif /* DUMBBELL_WIP */ 1392 /* resume AGP if in use */ 1393 radeon_agp_resume(rdev); 1394 radeon_resume(rdev); 1395 1396 r = radeon_ib_ring_tests(rdev); 1397 if (r) 1398 DRM_ERROR("ib ring test failed (%d).\n", r); 1399 1400 radeon_pm_resume(rdev); 1401 radeon_restore_bios_scratch_regs(rdev); 1402 1403 radeon_fbdev_set_suspend(rdev, 0); 1404#ifdef DUMBBELL_WIP 1405 console_unlock(); 1406#endif /* DUMBBELL_WIP */ 1407 1408 /* init dig PHYs, disp eng pll */ 1409 if (rdev->is_atom_bios) { 1410 radeon_atom_encoder_init(rdev); 1411 radeon_atom_disp_eng_pll_init(rdev); 1412 /* turn on the BL */ 1413 if (rdev->mode_info.bl_encoder) { 1414 u8 bl_level = radeon_get_backlight_level(rdev, 1415 rdev->mode_info.bl_encoder); 1416 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 1417 bl_level); 1418 } 1419 } 1420 /* reset hpd state */ 1421 radeon_hpd_init(rdev); 1422 /* blat the mode back in */ 1423 drm_helper_resume_force_mode(dev); 1424 /* turn on display hw */ 1425 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1426 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1427 } 1428 1429 drm_kms_helper_poll_enable(dev); 1430 return 0; 1431} 1432 1433/** 1434 * radeon_gpu_reset - reset the asic 1435 * 1436 * @rdev: radeon device pointer 1437 * 1438 * Attempt the reset the GPU if it has hung (all asics). 1439 * Returns 0 for success or an error on failure. 1440 */ 1441int radeon_gpu_reset(struct radeon_device *rdev) 1442{ 1443 unsigned ring_sizes[RADEON_NUM_RINGS]; 1444 uint32_t *ring_data[RADEON_NUM_RINGS]; 1445 1446 bool saved = false; 1447 1448 int i, r; 1449 int resched; 1450 1451 sx_xlock(&rdev->exclusive_lock); 1452 radeon_save_bios_scratch_regs(rdev); 1453 /* block TTM */ 1454 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1455 radeon_suspend(rdev); 1456 1457 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1458 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], 1459 &ring_data[i]); 1460 if (ring_sizes[i]) { 1461 saved = true; 1462 dev_info(rdev->dev, "Saved %d dwords of commands " 1463 "on ring %d.\n", ring_sizes[i], i); 1464 } 1465 } 1466 1467retry: 1468 r = radeon_asic_reset(rdev); 1469 if (!r) { 1470 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); 1471 radeon_resume(rdev); 1472 } 1473 1474 radeon_restore_bios_scratch_regs(rdev); 1475 1476 if (!r) { 1477 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1478 radeon_ring_restore(rdev, &rdev->ring[i], 1479 ring_sizes[i], ring_data[i]); 1480 ring_sizes[i] = 0; 1481 ring_data[i] = NULL; 1482 } 1483 1484 r = radeon_ib_ring_tests(rdev); 1485 if (r) { 1486 dev_err(rdev->dev, "ib ring test failed (%d).\n", r); 1487 if (saved) { 1488 saved = false; 1489 radeon_suspend(rdev); 1490 goto retry; 1491 } 1492 } 1493 } else { 1494 radeon_fence_driver_force_completion(rdev); 1495 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1496 free(ring_data[i], DRM_MEM_DRIVER); 1497 } 1498 } 1499 1500 drm_helper_resume_force_mode(rdev->ddev); 1501 1502 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1503 if (r) { 1504 /* bad news, how to tell it to userspace ? */ 1505 dev_info(rdev->dev, "GPU reset failed\n"); 1506 } 1507 1508 sx_xunlock(&rdev->exclusive_lock); 1509 return r; 1510} 1511 1512 1513/* 1514 * Debugfs 1515 */ 1516#ifdef DUMBBELL_WIP 1517int radeon_debugfs_add_files(struct radeon_device *rdev, 1518 struct drm_info_list *files, 1519 unsigned nfiles) 1520{ 1521 unsigned i; 1522 1523 for (i = 0; i < rdev->debugfs_count; i++) { 1524 if (rdev->debugfs[i].files == files) { 1525 /* Already registered */ 1526 return 0; 1527 } 1528 } 1529 1530 i = rdev->debugfs_count + 1; 1531 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { 1532 DRM_ERROR("Reached maximum number of debugfs components.\n"); 1533 DRM_ERROR("Report so we increase " 1534 "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); 1535 return -EINVAL; 1536 } 1537 rdev->debugfs[rdev->debugfs_count].files = files; 1538 rdev->debugfs[rdev->debugfs_count].num_files = nfiles; 1539 rdev->debugfs_count = i; 1540#if defined(CONFIG_DEBUG_FS) 1541 drm_debugfs_create_files(files, nfiles, 1542 rdev->ddev->control->debugfs_root, 1543 rdev->ddev->control); 1544 drm_debugfs_create_files(files, nfiles, 1545 rdev->ddev->primary->debugfs_root, 1546 rdev->ddev->primary); 1547#endif 1548 return 0; 1549} 1550 1551static void radeon_debugfs_remove_files(struct radeon_device *rdev) 1552{ 1553#if defined(CONFIG_DEBUG_FS) 1554 unsigned i; 1555 1556 for (i = 0; i < rdev->debugfs_count; i++) { 1557 drm_debugfs_remove_files(rdev->debugfs[i].files, 1558 rdev->debugfs[i].num_files, 1559 rdev->ddev->control); 1560 drm_debugfs_remove_files(rdev->debugfs[i].files, 1561 rdev->debugfs[i].num_files, 1562 rdev->ddev->primary); 1563 } 1564#endif 1565} 1566 1567#if defined(CONFIG_DEBUG_FS) 1568int radeon_debugfs_init(struct drm_minor *minor) 1569{ 1570 return 0; 1571} 1572 1573void radeon_debugfs_cleanup(struct drm_minor *minor) 1574{ 1575} 1576#endif /* DUMBBELL_WIP */ 1577#endif
|