1/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*- 2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com 3 * 4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 * DEALINGS IN THE SOFTWARE. 26 */ 27 28/** 29 * \file mga_dma.c 30 * DMA support for MGA G200 / G400. 31 * 32 * \author Rickard E. (Rik) Faith <faith@valinux.com> 33 * \author Jeff Hartmann <jhartmann@valinux.com> 34 * \author Keith Whitwell <keith@tungstengraphics.com> 35 * \author Gareth Hughes <gareth@valinux.com> 36 */ 37 38#include "drmP.h" 39#include "drm.h" 40#include "drm_sarea.h" 41#include "mga_drm.h" 42#include "mga_drv.h" 43 44#define MGA_DEFAULT_USEC_TIMEOUT 10000 45#define MGA_FREELIST_DEBUG 0 46 47#define MINIMAL_CLEANUP 0 48#define FULL_CLEANUP 1 49static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup); 50 51/* ================================================================ 52 * Engine control 53 */ 54 55int mga_do_wait_for_idle(drm_mga_private_t *dev_priv) 56{ 57 u32 status = 0; 58 int i; 59 DRM_DEBUG("\n"); 60 61 for (i = 0; i < dev_priv->usec_timeout; i++) { 62 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; 63 if (status == MGA_ENDPRDMASTS) { 64 MGA_WRITE8(MGA_CRTC_INDEX, 0); 65 return 0; 66 } 67 DRM_UDELAY(1); 68 } 69 70#if MGA_DMA_DEBUG 71 DRM_ERROR("failed!\n"); 72 DRM_INFO(" status=0x%08x\n", status); 73#endif 74 return -EBUSY; 75} 76 77static int mga_do_dma_reset(drm_mga_private_t *dev_priv) 78{ 79 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 80 drm_mga_primary_buffer_t *primary = &dev_priv->prim; 81 82 DRM_DEBUG("\n"); 83 84 /* The primary DMA stream should look like new right about now. 85 */ 86 primary->tail = 0; 87 primary->space = primary->size; 88 primary->last_flush = 0; 89 90 sarea_priv->last_wrap = 0; 91 92 93 94 return 0; 95} 96 97/* ================================================================ 98 * Primary DMA stream 99 */ 100 101void mga_do_dma_flush(drm_mga_private_t *dev_priv) 102{ 103 drm_mga_primary_buffer_t *primary = &dev_priv->prim; 104 u32 head, tail; 105 u32 status = 0; 106 int i; 107 DMA_LOCALS; 108 DRM_DEBUG("\n"); 109 110 /* We need to wait so that we can do an safe flush */ 111 for (i = 0; i < dev_priv->usec_timeout; i++) { 112 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; 113 if (status == MGA_ENDPRDMASTS) 114 break; 115 DRM_UDELAY(1); 116 } 117 118 if (primary->tail == primary->last_flush) { 119 DRM_DEBUG(" bailing out...\n"); 120 return; 121 } 122 123 tail = primary->tail + dev_priv->primary->offset; 124 125 /* We need to pad the stream between flushes, as the card 126 * actually (partially?) reads the first of these commands. 127 * See page 4-16 in the G400 manual, middle of the page or so. 128 */ 129 BEGIN_DMA(1); 130 131 DMA_BLOCK(MGA_DMAPAD, 0x00000000, 132 MGA_DMAPAD, 0x00000000, 133 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); 134 135 ADVANCE_DMA(); 136 137 primary->last_flush = primary->tail; 138 139 head = MGA_READ(MGA_PRIMADDRESS); 140 141 if (head <= tail) 142 primary->space = primary->size - primary->tail; 143 else 144 primary->space = head - tail; 145 146 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); 147 DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset)); 148 DRM_DEBUG(" space = 0x%06x\n", primary->space); 149 150 mga_flush_write_combine(); 151 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); 152 153 DRM_DEBUG("done.\n"); 154} 155 156void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv) 157{ 158 drm_mga_primary_buffer_t *primary = &dev_priv->prim; 159 u32 head, tail; 160 DMA_LOCALS; 161 DRM_DEBUG("\n"); 162 163 BEGIN_DMA_WRAP(); 164 165 DMA_BLOCK(MGA_DMAPAD, 0x00000000, 166 MGA_DMAPAD, 0x00000000, 167 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); 168 169 ADVANCE_DMA(); 170 171 tail = primary->tail + dev_priv->primary->offset; 172 173 primary->tail = 0; 174 primary->last_flush = 0; 175 primary->last_wrap++; 176 177 head = MGA_READ(MGA_PRIMADDRESS); 178 179 if (head == dev_priv->primary->offset) 180 primary->space = primary->size; 181 else 182 primary->space = head - dev_priv->primary->offset; 183 184 DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); 185 DRM_DEBUG(" tail = 0x%06x\n", primary->tail); 186 DRM_DEBUG(" wrap = %d\n", primary->last_wrap); 187 DRM_DEBUG(" space = 0x%06x\n", primary->space); 188 189 mga_flush_write_combine(); 190 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); 191 192 set_bit(0, &primary->wrapped); 193 DRM_DEBUG("done.\n"); 194} 195 196void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv) 197{ 198 drm_mga_primary_buffer_t *primary = &dev_priv->prim; 199 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 200 u32 head = dev_priv->primary->offset; 201 DRM_DEBUG("\n"); 202 203 sarea_priv->last_wrap++; 204 DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap); 205 206 mga_flush_write_combine(); 207 MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL); 208 209 clear_bit(0, &primary->wrapped); 210 DRM_DEBUG("done.\n"); 211} 212 213/* ================================================================ 214 * Freelist management 215 */ 216 217#define MGA_BUFFER_USED (~0) 218#define MGA_BUFFER_FREE 0 219 220#if MGA_FREELIST_DEBUG 221static void mga_freelist_print(struct drm_device *dev) 222{ 223 drm_mga_private_t *dev_priv = dev->dev_private; 224 drm_mga_freelist_t *entry; 225 226 DRM_INFO("\n"); 227 DRM_INFO("current dispatch: last=0x%x done=0x%x\n", 228 dev_priv->sarea_priv->last_dispatch, 229 (unsigned int)(MGA_READ(MGA_PRIMADDRESS) - 230 dev_priv->primary->offset)); 231 DRM_INFO("current freelist:\n"); 232 233 for (entry = dev_priv->head->next; entry; entry = entry->next) { 234 DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n", 235 entry, entry->buf->idx, entry->age.head, 236 (unsigned long)(entry->age.head - dev_priv->primary->offset)); 237 } 238 DRM_INFO("\n"); 239} 240#endif 241 242static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv) 243{ 244 struct drm_device_dma *dma = dev->dma; 245 struct drm_buf *buf; 246 drm_mga_buf_priv_t *buf_priv; 247 drm_mga_freelist_t *entry; 248 int i; 249 DRM_DEBUG("count=%d\n", dma->buf_count); 250 251 dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL); 252 if (dev_priv->head == NULL) 253 return -ENOMEM; 254 255 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); 256 257 for (i = 0; i < dma->buf_count; i++) { 258 buf = dma->buflist[i]; 259 buf_priv = buf->dev_private; 260 261 entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL); 262 if (entry == NULL) 263 return -ENOMEM; 264 265 entry->next = dev_priv->head->next; 266 entry->prev = dev_priv->head; 267 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); 268 entry->buf = buf; 269 270 if (dev_priv->head->next != NULL) 271 dev_priv->head->next->prev = entry; 272 if (entry->next == NULL) 273 dev_priv->tail = entry; 274 275 buf_priv->list_entry = entry; 276 buf_priv->discard = 0; 277 buf_priv->dispatched = 0; 278 279 dev_priv->head->next = entry; 280 } 281 282 return 0; 283} 284 285static void mga_freelist_cleanup(struct drm_device *dev) 286{ 287 drm_mga_private_t *dev_priv = dev->dev_private; 288 drm_mga_freelist_t *entry; 289 drm_mga_freelist_t *next; 290 DRM_DEBUG("\n"); 291 292 entry = dev_priv->head; 293 while (entry) { 294 next = entry->next; 295 kfree(entry); 296 entry = next; 297 } 298 299 dev_priv->head = dev_priv->tail = NULL; 300} 301 302 303static struct drm_buf *mga_freelist_get(struct drm_device * dev) 304{ 305 drm_mga_private_t *dev_priv = dev->dev_private; 306 drm_mga_freelist_t *next; 307 drm_mga_freelist_t *prev; 308 drm_mga_freelist_t *tail = dev_priv->tail; 309 u32 head, wrap; 310 DRM_DEBUG("\n"); 311 312 head = MGA_READ(MGA_PRIMADDRESS); 313 wrap = dev_priv->sarea_priv->last_wrap; 314 315 DRM_DEBUG(" tail=0x%06lx %d\n", 316 tail->age.head ? 317 (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0, 318 tail->age.wrap); 319 DRM_DEBUG(" head=0x%06lx %d\n", 320 (unsigned long)(head - dev_priv->primary->offset), wrap); 321 322 if (TEST_AGE(&tail->age, head, wrap)) { 323 prev = dev_priv->tail->prev; 324 next = dev_priv->tail; 325 prev->next = NULL; 326 next->prev = next->next = NULL; 327 dev_priv->tail = prev; 328 SET_AGE(&next->age, MGA_BUFFER_USED, 0); 329 return next->buf; 330 } 331 332 DRM_DEBUG("returning NULL!\n"); 333 return NULL; 334} 335 336int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf) 337{ 338 drm_mga_private_t *dev_priv = dev->dev_private; 339 drm_mga_buf_priv_t *buf_priv = buf->dev_private; 340 drm_mga_freelist_t *head, *entry, *prev; 341 342 DRM_DEBUG("age=0x%06lx wrap=%d\n", 343 (unsigned long)(buf_priv->list_entry->age.head - 344 dev_priv->primary->offset), 345 buf_priv->list_entry->age.wrap); 346 347 entry = buf_priv->list_entry; 348 head = dev_priv->head; 349 350 if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) { 351 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); 352 prev = dev_priv->tail; 353 prev->next = entry; 354 entry->prev = prev; 355 entry->next = NULL; 356 } else { 357 prev = head->next; 358 head->next = entry; 359 prev->prev = entry; 360 entry->prev = head; 361 entry->next = prev; 362 } 363 364 return 0; 365} 366 367/* ================================================================ 368 * DMA initialization, cleanup 369 */ 370 371int mga_driver_load(struct drm_device *dev, unsigned long flags) 372{ 373 drm_mga_private_t *dev_priv; 374 int ret; 375 376 dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL); 377 if (!dev_priv) 378 return -ENOMEM; 379 380 dev->dev_private = (void *)dev_priv; 381 382 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 383 dev_priv->chipset = flags; 384 385 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); 386 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); 387 388 dev->counters += 3; 389 dev->types[6] = _DRM_STAT_IRQ; 390 dev->types[7] = _DRM_STAT_PRIMARY; 391 dev->types[8] = _DRM_STAT_SECONDARY; 392 393 ret = drm_vblank_init(dev, 1); 394 395 if (ret) { 396 (void) mga_driver_unload(dev); 397 return ret; 398 } 399 400 return 0; 401} 402 403#if __OS_HAS_AGP 404/** 405 * Bootstrap the driver for AGP DMA. 406 * 407 * \todo 408 * Investigate whether there is any benifit to storing the WARP microcode in 409 * AGP memory. If not, the microcode may as well always be put in PCI 410 * memory. 411 * 412 * \todo 413 * This routine needs to set dma_bs->agp_mode to the mode actually configured 414 * in the hardware. Looking just at the Linux AGP driver code, I don't see 415 * an easy way to determine this. 416 * 417 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap 418 */ 419static int mga_do_agp_dma_bootstrap(struct drm_device *dev, 420 drm_mga_dma_bootstrap_t *dma_bs) 421{ 422 drm_mga_private_t *const dev_priv = 423 (drm_mga_private_t *) dev->dev_private; 424 unsigned int warp_size = MGA_WARP_UCODE_SIZE; 425 int err; 426 unsigned offset; 427 const unsigned secondary_size = dma_bs->secondary_bin_count 428 * dma_bs->secondary_bin_size; 429 const unsigned agp_size = (dma_bs->agp_size << 20); 430 struct drm_buf_desc req; 431 struct drm_agp_mode mode; 432 struct drm_agp_info info; 433 struct drm_agp_buffer agp_req; 434 struct drm_agp_binding bind_req; 435 436 /* Acquire AGP. */ 437 err = drm_agp_acquire(dev); 438 if (err) { 439 DRM_ERROR("Unable to acquire AGP: %d\n", err); 440 return err; 441 } 442 443 err = drm_agp_info(dev, &info); 444 if (err) { 445 DRM_ERROR("Unable to get AGP info: %d\n", err); 446 return err; 447 } 448 449 mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode; 450 err = drm_agp_enable(dev, mode); 451 if (err) { 452 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); 453 return err; 454 } 455 456 /* In addition to the usual AGP mode configuration, the G200 AGP cards 457 * need to have the AGP mode "manually" set. 458 */ 459 460 if (dev_priv->chipset == MGA_CARD_TYPE_G200) { 461 if (mode.mode & 0x02) 462 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); 463 else 464 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); 465 } 466 467 /* Allocate and bind AGP memory. */ 468 agp_req.size = agp_size; 469 agp_req.type = 0; 470 err = drm_agp_alloc(dev, &agp_req); 471 if (err) { 472 dev_priv->agp_size = 0; 473 DRM_ERROR("Unable to allocate %uMB AGP memory\n", 474 dma_bs->agp_size); 475 return err; 476 } 477 478 dev_priv->agp_size = agp_size; 479 dev_priv->agp_handle = agp_req.handle; 480 481 bind_req.handle = agp_req.handle; 482 bind_req.offset = 0; 483 err = drm_agp_bind(dev, &bind_req); 484 if (err) { 485 DRM_ERROR("Unable to bind AGP memory: %d\n", err); 486 return err; 487 } 488 489 /* Make drm_addbufs happy by not trying to create a mapping for less 490 * than a page. 491 */ 492 if (warp_size < PAGE_SIZE) 493 warp_size = PAGE_SIZE; 494 495 offset = 0; 496 err = drm_addmap(dev, offset, warp_size, 497 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); 498 if (err) { 499 DRM_ERROR("Unable to map WARP microcode: %d\n", err); 500 return err; 501 } 502 503 offset += warp_size; 504 err = drm_addmap(dev, offset, dma_bs->primary_size, 505 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary); 506 if (err) { 507 DRM_ERROR("Unable to map primary DMA region: %d\n", err); 508 return err; 509 } 510 511 offset += dma_bs->primary_size; 512 err = drm_addmap(dev, offset, secondary_size, 513 _DRM_AGP, 0, &dev->agp_buffer_map); 514 if (err) { 515 DRM_ERROR("Unable to map secondary DMA region: %d\n", err); 516 return err; 517 } 518 519 (void)memset(&req, 0, sizeof(req)); 520 req.count = dma_bs->secondary_bin_count; 521 req.size = dma_bs->secondary_bin_size; 522 req.flags = _DRM_AGP_BUFFER; 523 req.agp_start = offset; 524 525 err = drm_addbufs_agp(dev, &req); 526 if (err) { 527 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); 528 return err; 529 } 530 531 { 532 struct drm_map_list *_entry; 533 unsigned long agp_token = 0; 534 535 list_for_each_entry(_entry, &dev->maplist, head) { 536 if (_entry->map == dev->agp_buffer_map) 537 agp_token = _entry->user_token; 538 } 539 if (!agp_token) 540 return -EFAULT; 541 542 dev->agp_buffer_token = agp_token; 543 } 544 545 offset += secondary_size; 546 err = drm_addmap(dev, offset, agp_size - offset, 547 _DRM_AGP, 0, &dev_priv->agp_textures); 548 if (err) { 549 DRM_ERROR("Unable to map AGP texture region %d\n", err); 550 return err; 551 } 552 553 drm_core_ioremap(dev_priv->warp, dev); 554 drm_core_ioremap(dev_priv->primary, dev); 555 drm_core_ioremap(dev->agp_buffer_map, dev); 556 557 if (!dev_priv->warp->handle || 558 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { 559 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", 560 dev_priv->warp->handle, dev_priv->primary->handle, 561 dev->agp_buffer_map->handle); 562 return -ENOMEM; 563 } 564 565 dev_priv->dma_access = MGA_PAGPXFER; 566 dev_priv->wagp_enable = MGA_WAGP_ENABLE; 567 568 DRM_INFO("Initialized card for AGP DMA.\n"); 569 return 0; 570} 571#else 572static int mga_do_agp_dma_bootstrap(struct drm_device *dev, 573 drm_mga_dma_bootstrap_t *dma_bs) 574{ 575 return -EINVAL; 576} 577#endif 578 579/** 580 * Bootstrap the driver for PCI DMA. 581 * 582 * \todo 583 * The algorithm for decreasing the size of the primary DMA buffer could be 584 * better. The size should be rounded up to the nearest page size, then 585 * decrease the request size by a single page each pass through the loop. 586 * 587 * \todo 588 * Determine whether the maximum address passed to drm_pci_alloc is correct. 589 * The same goes for drm_addbufs_pci. 590 * 591 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap 592 */ 593static int mga_do_pci_dma_bootstrap(struct drm_device *dev, 594 drm_mga_dma_bootstrap_t *dma_bs) 595{ 596 drm_mga_private_t *const dev_priv = 597 (drm_mga_private_t *) dev->dev_private; 598 unsigned int warp_size = MGA_WARP_UCODE_SIZE; 599 unsigned int primary_size; 600 unsigned int bin_count; 601 int err; 602 struct drm_buf_desc req; 603 604 if (dev->dma == NULL) { 605 DRM_ERROR("dev->dma is NULL\n"); 606 return -EFAULT; 607 } 608 609 /* Make drm_addbufs happy by not trying to create a mapping for less 610 * than a page. 611 */ 612 if (warp_size < PAGE_SIZE) 613 warp_size = PAGE_SIZE; 614 615 /* The proper alignment is 0x100 for this mapping */ 616 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, 617 _DRM_READ_ONLY, &dev_priv->warp); 618 if (err != 0) { 619 DRM_ERROR("Unable to create mapping for WARP microcode: %d\n", 620 err); 621 return err; 622 } 623 624 /* Other than the bottom two bits being used to encode other 625 * information, there don't appear to be any restrictions on the 626 * alignment of the primary or secondary DMA buffers. 627 */ 628 629 for (primary_size = dma_bs->primary_size; primary_size != 0; 630 primary_size >>= 1) { 631 /* The proper alignment for this mapping is 0x04 */ 632 err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, 633 _DRM_READ_ONLY, &dev_priv->primary); 634 if (!err) 635 break; 636 } 637 638 if (err != 0) { 639 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); 640 return -ENOMEM; 641 } 642 643 if (dev_priv->primary->size != dma_bs->primary_size) { 644 DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", 645 dma_bs->primary_size, 646 (unsigned)dev_priv->primary->size); 647 dma_bs->primary_size = dev_priv->primary->size; 648 } 649 650 for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; 651 bin_count--) { 652 (void)memset(&req, 0, sizeof(req)); 653 req.count = bin_count; 654 req.size = dma_bs->secondary_bin_size; 655 656 err = drm_addbufs_pci(dev, &req); 657 if (!err) 658 break; 659 } 660 661 if (bin_count == 0) { 662 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); 663 return err; 664 } 665 666 if (bin_count != dma_bs->secondary_bin_count) { 667 DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u " 668 "to %u.\n", dma_bs->secondary_bin_count, bin_count); 669 670 dma_bs->secondary_bin_count = bin_count; 671 } 672 673 dev_priv->dma_access = 0; 674 dev_priv->wagp_enable = 0; 675 676 dma_bs->agp_mode = 0; 677 678 DRM_INFO("Initialized card for PCI DMA.\n"); 679 return 0; 680} 681 682static int mga_do_dma_bootstrap(struct drm_device *dev, 683 drm_mga_dma_bootstrap_t *dma_bs) 684{ 685 const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); 686 int err; 687 drm_mga_private_t *const dev_priv = 688 (drm_mga_private_t *) dev->dev_private; 689 690 dev_priv->used_new_dma_init = 1; 691 692 /* The first steps are the same for both PCI and AGP based DMA. Map 693 * the cards MMIO registers and map a status page. 694 */ 695 err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, 696 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); 697 if (err) { 698 DRM_ERROR("Unable to map MMIO region: %d\n", err); 699 return err; 700 } 701 702 err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, 703 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, 704 &dev_priv->status); 705 if (err) { 706 DRM_ERROR("Unable to map status region: %d\n", err); 707 return err; 708 } 709 710 /* The DMA initialization procedure is slightly different for PCI and 711 * AGP cards. AGP cards just allocate a large block of AGP memory and 712 * carve off portions of it for internal uses. The remaining memory 713 * is returned to user-mode to be used for AGP textures. 714 */ 715 if (is_agp) 716 err = mga_do_agp_dma_bootstrap(dev, dma_bs); 717 718 /* If we attempted to initialize the card for AGP DMA but failed, 719 * clean-up any mess that may have been created. 720 */ 721 722 if (err) 723 mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); 724 725 /* Not only do we want to try and initialized PCI cards for PCI DMA, 726 * but we also try to initialized AGP cards that could not be 727 * initialized for AGP DMA. This covers the case where we have an AGP 728 * card in a system with an unsupported AGP chipset. In that case the 729 * card will be detected as AGP, but we won't be able to allocate any 730 * AGP memory, etc. 731 */ 732 733 if (!is_agp || err) 734 err = mga_do_pci_dma_bootstrap(dev, dma_bs); 735 736 return err; 737} 738 739int mga_dma_bootstrap(struct drm_device *dev, void *data, 740 struct drm_file *file_priv) 741{ 742 drm_mga_dma_bootstrap_t *bootstrap = data; 743 int err; 744 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; 745 const drm_mga_private_t *const dev_priv = 746 (drm_mga_private_t *) dev->dev_private; 747 748 err = mga_do_dma_bootstrap(dev, bootstrap); 749 if (err) { 750 mga_do_cleanup_dma(dev, FULL_CLEANUP); 751 return err; 752 } 753 754 if (dev_priv->agp_textures != NULL) { 755 bootstrap->texture_handle = dev_priv->agp_textures->offset; 756 bootstrap->texture_size = dev_priv->agp_textures->size; 757 } else { 758 bootstrap->texture_handle = 0; 759 bootstrap->texture_size = 0; 760 } 761 762 bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; 763 764 return err; 765} 766 767static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init) 768{ 769 drm_mga_private_t *dev_priv; 770 int ret; 771 DRM_DEBUG("\n"); 772 773 dev_priv = dev->dev_private; 774 775 if (init->sgram) 776 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; 777 else 778 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; 779 dev_priv->maccess = init->maccess; 780 781 dev_priv->fb_cpp = init->fb_cpp; 782 dev_priv->front_offset = init->front_offset; 783 dev_priv->front_pitch = init->front_pitch; 784 dev_priv->back_offset = init->back_offset; 785 dev_priv->back_pitch = init->back_pitch; 786 787 dev_priv->depth_cpp = init->depth_cpp; 788 dev_priv->depth_offset = init->depth_offset; 789 dev_priv->depth_pitch = init->depth_pitch; 790 791 dev_priv->texture_offset = init->texture_offset[0]; 792 dev_priv->texture_size = init->texture_size[0]; 793 794 dev_priv->sarea = drm_getsarea(dev); 795 if (!dev_priv->sarea) { 796 DRM_ERROR("failed to find sarea!\n"); 797 return -EINVAL; 798 } 799 800 if (!dev_priv->used_new_dma_init) { 801 802 dev_priv->dma_access = MGA_PAGPXFER; 803 dev_priv->wagp_enable = MGA_WAGP_ENABLE; 804 805 dev_priv->status = drm_core_findmap(dev, init->status_offset); 806 if (!dev_priv->status) { 807 DRM_ERROR("failed to find status page!\n"); 808 return -EINVAL; 809 } 810 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 811 if (!dev_priv->mmio) { 812 DRM_ERROR("failed to find mmio region!\n"); 813 return -EINVAL; 814 } 815 dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 816 if (!dev_priv->warp) { 817 DRM_ERROR("failed to find warp microcode region!\n"); 818 return -EINVAL; 819 } 820 dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 821 if (!dev_priv->primary) { 822 DRM_ERROR("failed to find primary dma region!\n"); 823 return -EINVAL; 824 } 825 dev->agp_buffer_token = init->buffers_offset; 826 dev->agp_buffer_map = 827 drm_core_findmap(dev, init->buffers_offset); 828 if (!dev->agp_buffer_map) { 829 DRM_ERROR("failed to find dma buffer region!\n"); 830 return -EINVAL; 831 } 832 833 drm_core_ioremap(dev_priv->warp, dev); 834 drm_core_ioremap(dev_priv->primary, dev); 835 drm_core_ioremap(dev->agp_buffer_map, dev); 836 } 837 838 dev_priv->sarea_priv = 839 (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle + 840 init->sarea_priv_offset); 841 842 if (!dev_priv->warp->handle || 843 !dev_priv->primary->handle || 844 ((dev_priv->dma_access != 0) && 845 ((dev->agp_buffer_map == NULL) || 846 (dev->agp_buffer_map->handle == NULL)))) { 847 DRM_ERROR("failed to ioremap agp regions!\n"); 848 return -ENOMEM; 849 } 850 851 ret = mga_warp_install_microcode(dev_priv); 852 if (ret < 0) { 853 DRM_ERROR("failed to install WARP ucode!: %d\n", ret); 854 return ret; 855 } 856 857 ret = mga_warp_init(dev_priv); 858 if (ret < 0) { 859 DRM_ERROR("failed to init WARP engine!: %d\n", ret); 860 return ret; 861 } 862 863 dev_priv->prim.status = (u32 *) dev_priv->status->handle; 864 865 mga_do_wait_for_idle(dev_priv); 866 867 /* Init the primary DMA registers. 868 */ 869 MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); 870 871 dev_priv->prim.start = (u8 *) dev_priv->primary->handle; 872 dev_priv->prim.end = ((u8 *) dev_priv->primary->handle 873 + dev_priv->primary->size); 874 dev_priv->prim.size = dev_priv->primary->size; 875 876 dev_priv->prim.tail = 0; 877 dev_priv->prim.space = dev_priv->prim.size; 878 dev_priv->prim.wrapped = 0; 879 880 dev_priv->prim.last_flush = 0; 881 dev_priv->prim.last_wrap = 0; 882 883 dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; 884 885 dev_priv->prim.status[0] = dev_priv->primary->offset; 886 dev_priv->prim.status[1] = 0; 887 888 dev_priv->sarea_priv->last_wrap = 0; 889 dev_priv->sarea_priv->last_frame.head = 0; 890 dev_priv->sarea_priv->last_frame.wrap = 0; 891 892 if (mga_freelist_init(dev, dev_priv) < 0) { 893 DRM_ERROR("could not initialize freelist\n"); 894 return -ENOMEM; 895 } 896 897 return 0; 898} 899 900static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) 901{ 902 int err = 0; 903 DRM_DEBUG("\n"); 904 905 /* Make sure interrupts are disabled here because the uninstall ioctl 906 * may not have been called from userspace and after dev_private 907 * is freed, it's too late. 908 */ 909 if (dev->irq_enabled) 910 drm_irq_uninstall(dev); 911 912 if (dev->dev_private) { 913 drm_mga_private_t *dev_priv = dev->dev_private; 914 915 if ((dev_priv->warp != NULL) 916 && (dev_priv->warp->type != _DRM_CONSISTENT)) 917 drm_core_ioremapfree(dev_priv->warp, dev); 918 919 if ((dev_priv->primary != NULL) 920 && (dev_priv->primary->type != _DRM_CONSISTENT)) 921 drm_core_ioremapfree(dev_priv->primary, dev); 922 923 if (dev->agp_buffer_map != NULL) 924 drm_core_ioremapfree(dev->agp_buffer_map, dev); 925 926 if (dev_priv->used_new_dma_init) { 927#if __OS_HAS_AGP 928 if (dev_priv->agp_handle != 0) { 929 struct drm_agp_binding unbind_req; 930 struct drm_agp_buffer free_req; 931 932 unbind_req.handle = dev_priv->agp_handle; 933 drm_agp_unbind(dev, &unbind_req); 934 935 free_req.handle = dev_priv->agp_handle; 936 drm_agp_free(dev, &free_req); 937 938 dev_priv->agp_textures = NULL; 939 dev_priv->agp_size = 0; 940 dev_priv->agp_handle = 0; 941 } 942 943 if ((dev->agp != NULL) && dev->agp->acquired) 944 err = drm_agp_release(dev); 945#endif 946 } 947 948 dev_priv->warp = NULL; 949 dev_priv->primary = NULL; 950 dev_priv->sarea = NULL; 951 dev_priv->sarea_priv = NULL; 952 dev->agp_buffer_map = NULL; 953 954 if (full_cleanup) { 955 dev_priv->mmio = NULL; 956 dev_priv->status = NULL; 957 dev_priv->used_new_dma_init = 0; 958 } 959 960 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); 961 dev_priv->warp_pipe = 0; 962 memset(dev_priv->warp_pipe_phys, 0, 963 sizeof(dev_priv->warp_pipe_phys)); 964 965 if (dev_priv->head != NULL) 966 mga_freelist_cleanup(dev); 967 } 968 969 return err; 970} 971 972int mga_dma_init(struct drm_device *dev, void *data, 973 struct drm_file *file_priv) 974{ 975 drm_mga_init_t *init = data; 976 int err; 977 978 LOCK_TEST_WITH_RETURN(dev, file_priv); 979 980 switch (init->func) { 981 case MGA_INIT_DMA: 982 err = mga_do_init_dma(dev, init); 983 if (err) 984 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); 985 return err; 986 case MGA_CLEANUP_DMA: 987 return mga_do_cleanup_dma(dev, FULL_CLEANUP); 988 } 989 990 return -EINVAL; 991} 992 993/* ================================================================ 994 * Primary DMA stream management 995 */ 996 997int mga_dma_flush(struct drm_device *dev, void *data, 998 struct drm_file *file_priv) 999{ 1000 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1001 struct drm_lock *lock = data; 1002 1003 LOCK_TEST_WITH_RETURN(dev, file_priv); 1004 1005 DRM_DEBUG("%s%s%s\n", 1006 (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "", 1007 (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", 1008 (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); 1009 1010 WRAP_WAIT_WITH_RETURN(dev_priv); 1011 1012 if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) 1013 mga_do_dma_flush(dev_priv); 1014 1015 if (lock->flags & _DRM_LOCK_QUIESCENT) { 1016#if MGA_DMA_DEBUG 1017 int ret = mga_do_wait_for_idle(dev_priv); 1018 if (ret < 0) 1019 DRM_INFO("-EBUSY\n"); 1020 return ret; 1021#else 1022 return mga_do_wait_for_idle(dev_priv); 1023#endif 1024 } else { 1025 return 0; 1026 } 1027} 1028 1029int mga_dma_reset(struct drm_device *dev, void *data, 1030 struct drm_file *file_priv) 1031{ 1032 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1033 1034 LOCK_TEST_WITH_RETURN(dev, file_priv); 1035 1036 return mga_do_dma_reset(dev_priv); 1037} 1038 1039/* ================================================================ 1040 * DMA buffer management 1041 */ 1042 1043static int mga_dma_get_buffers(struct drm_device *dev, 1044 struct drm_file *file_priv, struct drm_dma *d) 1045{ 1046 struct drm_buf *buf; 1047 int i; 1048 1049 for (i = d->granted_count; i < d->request_count; i++) { 1050 buf = mga_freelist_get(dev); 1051 if (!buf) 1052 return -EAGAIN; 1053 1054 buf->file_priv = file_priv; 1055 1056 if (DRM_COPY_TO_USER(&d->request_indices[i], 1057 &buf->idx, sizeof(buf->idx))) 1058 return -EFAULT; 1059 if (DRM_COPY_TO_USER(&d->request_sizes[i], 1060 &buf->total, sizeof(buf->total))) 1061 return -EFAULT; 1062 1063 d->granted_count++; 1064 } 1065 return 0; 1066} 1067 1068int mga_dma_buffers(struct drm_device *dev, void *data, 1069 struct drm_file *file_priv) 1070{ 1071 struct drm_device_dma *dma = dev->dma; 1072 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1073 struct drm_dma *d = data; 1074 int ret = 0; 1075 1076 LOCK_TEST_WITH_RETURN(dev, file_priv); 1077 1078 /* Please don't send us buffers. 1079 */ 1080 if (d->send_count != 0) { 1081 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1082 DRM_CURRENTPID, d->send_count); 1083 return -EINVAL; 1084 } 1085 1086 /* We'll send you buffers. 1087 */ 1088 if (d->request_count < 0 || d->request_count > dma->buf_count) { 1089 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1090 DRM_CURRENTPID, d->request_count, dma->buf_count); 1091 return -EINVAL; 1092 } 1093 1094 WRAP_TEST_WITH_RETURN(dev_priv); 1095 1096 d->granted_count = 0; 1097 1098 if (d->request_count) 1099 ret = mga_dma_get_buffers(dev, file_priv, d); 1100 1101 return ret; 1102} 1103 1104/** 1105 * Called just before the module is unloaded. 1106 */ 1107int mga_driver_unload(struct drm_device *dev) 1108{ 1109 kfree(dev->dev_private); 1110 dev->dev_private = NULL; 1111 1112 return 0; 1113} 1114 1115/** 1116 * Called when the last opener of the device is closed. 1117 */ 1118void mga_driver_lastclose(struct drm_device *dev) 1119{ 1120 mga_do_cleanup_dma(dev, FULL_CLEANUP); 1121} 1122 1123int mga_driver_dma_quiescent(struct drm_device *dev) 1124{ 1125 drm_mga_private_t *dev_priv = dev->dev_private; 1126 return mga_do_wait_for_idle(dev_priv); 1127} 1128