1/* 2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 3 * Copyright 2005 Stephane Marchesin 4 * 5 * The Weather Channel (TM) funded Tungsten Graphics to develop the 6 * initial release of the Radeon 8500 driver under the XFree86 license. 7 * This notice must be preserved. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 * 28 * Authors: 29 * Keith Whitwell <keith@tungstengraphics.com> 30 */ 31 32 33#include "drmP.h" 34#include "drm.h" 35#include "drm_sarea.h" 36#include "nouveau_drv.h" 37 38static struct mem_block * 39split_block(struct mem_block *p, uint64_t start, uint64_t size, 40 struct drm_file *file_priv) 41{ 42 /* Maybe cut off the start of an existing block */ 43 if (start > p->start) { 44 struct mem_block *newblock = 45 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); 46 if (!newblock) 47 goto out; 48 newblock->start = start; 49 newblock->size = p->size - (start - p->start); 50 newblock->file_priv = NULL; 51 newblock->next = p->next; 52 newblock->prev = p; 53 p->next->prev = newblock; 54 p->next = newblock; 55 p->size -= newblock->size; 56 p = newblock; 57 } 58 59 /* Maybe cut off the end of an existing block */ 60 if (size < p->size) { 61 struct mem_block *newblock = 62 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); 63 if (!newblock) 64 goto out; 65 newblock->start = start + size; 66 newblock->size = p->size - size; 67 newblock->file_priv = NULL; 68 newblock->next = p->next; 69 newblock->prev = p; 70 p->next->prev = newblock; 71 p->next = newblock; 72 p->size = size; 73 } 74 75out: 76 /* Our block is in the middle */ 77 p->file_priv = file_priv; 78 return p; 79} 80 81struct mem_block * 82nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, 83 int align2, struct drm_file *file_priv, int tail) 84{ 85 struct mem_block *p; 86 uint64_t mask = (1 << align2) - 1; 87 88 if (!heap) 89 return NULL; 90 91 if (tail) { 92 list_for_each_prev(p, heap) { 93 uint64_t start = ((p->start + p->size) - size) & ~mask; 94 95 if (p->file_priv == 0 && start >= p->start && 96 start + size <= p->start + p->size) 97 return split_block(p, start, size, file_priv); 98 } 99 } else { 100 list_for_each(p, heap) { 101 uint64_t start = (p->start + mask) & ~mask; 102 103 if (p->file_priv == 0 && 104 start + size <= p->start + p->size) 105 return split_block(p, start, size, file_priv); 106 } 107 } 108 109 return NULL; 110} 111 112static struct mem_block *find_block(struct mem_block *heap, uint64_t start) 113{ 114 struct mem_block *p; 115 116 list_for_each(p, heap) 117 if (p->start == start) 118 return p; 119 120 return NULL; 121} 122 123void nouveau_mem_free_block(struct mem_block *p) 124{ 125 p->file_priv = NULL; 126 127 /* Assumes a single contiguous range. Needs a special file_priv in 128 * 'heap' to stop it being subsumed. 129 */ 130 if (p->next->file_priv == 0) { 131 struct mem_block *q = p->next; 132 p->size += q->size; 133 p->next = q->next; 134 p->next->prev = p; 135 drm_free(q, sizeof(*q), DRM_MEM_BUFS); 136 } 137 138 if (p->prev->file_priv == 0) { 139 struct mem_block *q = p->prev; 140 q->size += p->size; 141 q->next = p->next; 142 q->next->prev = q; 143 drm_free(p, sizeof(*q), DRM_MEM_BUFS); 144 } 145} 146 147/* Initialize. How to check for an uninitialized heap? 148 */ 149int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, 150 uint64_t size) 151{ 152 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); 153 154 if (!blocks) 155 return -ENOMEM; 156 157 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); 158 if (!*heap) { 159 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); 160 return -ENOMEM; 161 } 162 163 blocks->start = start; 164 blocks->size = size; 165 blocks->file_priv = NULL; 166 blocks->next = blocks->prev = *heap; 167 168 memset(*heap, 0, sizeof(**heap)); 169 (*heap)->file_priv = (struct drm_file *) - 1; 170 (*heap)->next = (*heap)->prev = blocks; 171 return 0; 172} 173 174/* 175 * Free all blocks associated with the releasing file_priv 176 */ 177void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) 178{ 179 struct mem_block *p; 180 181 if (!heap || !heap->next) 182 return; 183 184 list_for_each(p, heap) { 185 if (p->file_priv == file_priv) 186 p->file_priv = NULL; 187 } 188 189 /* Assumes a single contiguous range. Needs a special file_priv in 190 * 'heap' to stop it being subsumed. 191 */ 192 list_for_each(p, heap) { 193 while ((p->file_priv == 0) && (p->next->file_priv == 0) && 194 (p->next!=heap)) { 195 struct mem_block *q = p->next; 196 p->size += q->size; 197 p->next = q->next; 198 p->next->prev = p; 199 drm_free(q, sizeof(*q), DRM_MEM_DRIVER); 200 } 201 } 202} 203 204/* 205 * Cleanup everything 206 */ 207void nouveau_mem_takedown(struct mem_block **heap) 208{ 209 struct mem_block *p; 210 211 if (!*heap) 212 return; 213 214 for (p = (*heap)->next; p != *heap;) { 215 struct mem_block *q = p; 216 p = p->next; 217 drm_free(q, sizeof(*q), DRM_MEM_DRIVER); 218 } 219 220 drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); 221 *heap = NULL; 222} 223 224void nouveau_mem_close(struct drm_device *dev) 225{ 226 struct drm_nouveau_private *dev_priv = dev->dev_private; 227 228 nouveau_mem_takedown(&dev_priv->agp_heap); 229 nouveau_mem_takedown(&dev_priv->fb_heap); 230 if (dev_priv->pci_heap) 231 nouveau_mem_takedown(&dev_priv->pci_heap); 232} 233 234/*XXX BSD needs compat functions for pci access 235 * #define DRM_PCI_DEV struct device 236 * #define drm_pci_get_bsf pci_get_bsf 237 * and a small inline to do *val = pci_read_config(pdev->device, where, 4); 238 * might work 239 */ 240static int nforce_pci_fn_read_config_dword(int devfn, int where, uint32_t *val) 241{ 242#ifdef __linux__ 243 DRM_PCI_DEV *pdev; 244 245 if (!(pdev = drm_pci_get_bsf(0, 0, devfn))) { 246 DRM_ERROR("nForce PCI device function 0x%02x not found\n", 247 devfn); 248 return -ENODEV; 249 } 250 251 return drm_pci_read_config_dword(pdev, where, val); 252#else 253 DRM_ERROR("BSD compat for checking IGP memory amount needed\n"); 254 return 0; 255#endif 256} 257 258static void nouveau_mem_check_nforce_dimms(struct drm_device *dev) 259{ 260 uint32_t mem_ctrlr_pciid; 261 262 nforce_pci_fn_read_config_dword(3, 0x00, &mem_ctrlr_pciid); 263 mem_ctrlr_pciid >>= 16; 264 265 if (mem_ctrlr_pciid == 0x01a9 || mem_ctrlr_pciid == 0x01ab || 266 mem_ctrlr_pciid == 0x01ed) { 267 uint32_t dimm[3]; 268 int i; 269 270 for (i = 0; i < 3; i++) { 271 nforce_pci_fn_read_config_dword(2, 0x40 + i * 4, &dimm[i]); 272 dimm[i] = (dimm[i] >> 8) & 0x4f; 273 } 274 275 if (dimm[0] + dimm[1] != dimm[2]) 276 DRM_INFO("Your nForce DIMMs are not arranged in " 277 "optimal banks!\n"); 278 } 279} 280 281static uint32_t 282nouveau_mem_fb_amount_igp(struct drm_device *dev) 283{ 284 struct drm_nouveau_private *dev_priv = dev->dev_private; 285 uint32_t mem = 0; 286 287 if (dev_priv->flags & NV_NFORCE) { 288 nforce_pci_fn_read_config_dword(1, 0x7C, &mem); 289 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; 290 } 291 if (dev_priv->flags & NV_NFORCE2) { 292 nforce_pci_fn_read_config_dword(1, 0x84, &mem); 293 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; 294 } 295 296 DRM_ERROR("impossible!\n"); 297 298 return 0; 299} 300 301/* returns the amount of FB ram in bytes */ 302uint64_t nouveau_mem_fb_amount(struct drm_device *dev) 303{ 304 struct drm_nouveau_private *dev_priv=dev->dev_private; 305 switch(dev_priv->card_type) 306 { 307 case NV_04: 308 case NV_05: 309 if (NV_READ(NV03_BOOT_0) & 0x00000100) { 310 return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; 311 } else 312 switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) 313 { 314 case NV04_BOOT_0_RAM_AMOUNT_32MB: 315 return 32*1024*1024; 316 case NV04_BOOT_0_RAM_AMOUNT_16MB: 317 return 16*1024*1024; 318 case NV04_BOOT_0_RAM_AMOUNT_8MB: 319 return 8*1024*1024; 320 case NV04_BOOT_0_RAM_AMOUNT_4MB: 321 return 4*1024*1024; 322 } 323 break; 324 case NV_10: 325 case NV_11: 326 case NV_17: 327 case NV_20: 328 case NV_30: 329 case NV_40: 330 case NV_44: 331 case NV_50: 332 default: 333 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { 334 return nouveau_mem_fb_amount_igp(dev); 335 } else { 336 uint64_t mem; 337 338 mem = (NV_READ(NV10_PFB_CSTATUS) & 339 NV10_PFB_CSTATUS_RAM_AMOUNT_MB_MASK) >> 340 NV10_PFB_CSTATUS_RAM_AMOUNT_MB_SHIFT; 341 return mem*1024*1024; 342 } 343 break; 344 } 345 346 DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); 347 return 0; 348} 349 350static void nouveau_mem_reset_agp(struct drm_device *dev) 351{ 352 struct drm_nouveau_private *dev_priv = dev->dev_private; 353 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; 354 355 saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1); 356 saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19); 357 358 /* clear busmaster bit */ 359 NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); 360 /* clear SBA and AGP bits */ 361 NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); 362 363 /* power cycle pgraph, if enabled */ 364 pmc_enable = NV_READ(NV03_PMC_ENABLE); 365 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { 366 NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH); 367 NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | 368 NV_PMC_ENABLE_PGRAPH); 369 } 370 371 /* and restore (gives effect of resetting AGP) */ 372 NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19); 373 NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1); 374} 375 376static int 377nouveau_mem_init_agp(struct drm_device *dev, int ttm) 378{ 379 struct drm_nouveau_private *dev_priv = dev->dev_private; 380 struct drm_agp_info info; 381 struct drm_agp_mode mode; 382 int ret; 383 384 nouveau_mem_reset_agp(dev); 385 386 ret = drm_agp_acquire(dev); 387 if (ret) { 388 DRM_ERROR("Unable to acquire AGP: %d\n", ret); 389 return ret; 390 } 391 392 ret = drm_agp_info(dev, &info); 393 if (ret) { 394 DRM_ERROR("Unable to get AGP info: %d\n", ret); 395 return ret; 396 } 397 398 /* see agp.h for the AGPSTAT_* modes available */ 399 mode.mode = info.mode; 400 ret = drm_agp_enable(dev, mode); 401 if (ret) { 402 DRM_ERROR("Unable to enable AGP: %d\n", ret); 403 return ret; 404 } 405 406 if (!ttm) { 407 struct drm_agp_buffer agp_req; 408 struct drm_agp_binding bind_req; 409 410 agp_req.size = info.aperture_size; 411 agp_req.type = 0; 412 ret = drm_agp_alloc(dev, &agp_req); 413 if (ret) { 414 DRM_ERROR("Unable to alloc AGP: %d\n", ret); 415 return ret; 416 } 417 418 bind_req.handle = agp_req.handle; 419 bind_req.offset = 0; 420 ret = drm_agp_bind(dev, &bind_req); 421 if (ret) { 422 DRM_ERROR("Unable to bind AGP: %d\n", ret); 423 return ret; 424 } 425 } 426 427 dev_priv->gart_info.type = NOUVEAU_GART_AGP; 428 dev_priv->gart_info.aper_base = info.aperture_base; 429 dev_priv->gart_info.aper_size = info.aperture_size; 430 return 0; 431} 432 433#define HACK_OLD_MM 434int 435nouveau_mem_init_ttm(struct drm_device *dev) 436{ 437 struct drm_nouveau_private *dev_priv = dev->dev_private; 438 uint32_t vram_size, bar1_size; 439 int ret; 440 441 dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; 442 dev_priv->fb_phys = drm_get_resource_start(dev,1); 443 dev_priv->gart_info.type = NOUVEAU_GART_NONE; 444 445 drm_bo_driver_init(dev); 446 447 /* non-mappable vram */ 448 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); 449 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 450 vram_size = dev_priv->fb_available_size >> PAGE_SHIFT; 451 bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT; 452 if (bar1_size < vram_size) { 453 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, 454 bar1_size, vram_size - bar1_size, 1))) { 455 DRM_ERROR("Failed PRIV0 mm init: %d\n", ret); 456 return ret; 457 } 458 vram_size = bar1_size; 459 } 460 461 /* mappable vram */ 462#ifdef HACK_OLD_MM 463 vram_size /= 4; 464#endif 465 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) { 466 DRM_ERROR("Failed VRAM mm init: %d\n", ret); 467 return ret; 468 } 469 470 /* GART */ 471#if !defined(__powerpc__) && !defined(__ia64__) 472 if (drm_device_is_agp(dev) && dev->agp) { 473 if ((ret = nouveau_mem_init_agp(dev, 1))) 474 DRM_ERROR("Error initialising AGP: %d\n", ret); 475 } 476#endif 477 478 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { 479 if ((ret = nouveau_sgdma_init(dev))) 480 DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret); 481 } 482 483 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, 484 dev_priv->gart_info.aper_size >> 485 PAGE_SHIFT, 1))) { 486 DRM_ERROR("Failed TT mm init: %d\n", ret); 487 return ret; 488 } 489 490#ifdef HACK_OLD_MM 491 vram_size <<= PAGE_SHIFT; 492 DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10); 493 if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3)) 494 return -ENOMEM; 495#endif 496 497 return 0; 498} 499 500int nouveau_mem_init(struct drm_device *dev) 501{ 502 struct drm_nouveau_private *dev_priv = dev->dev_private; 503 uint32_t fb_size; 504 int ret = 0; 505 506 dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; 507 dev_priv->fb_phys = 0; 508 dev_priv->gart_info.type = NOUVEAU_GART_NONE; 509 510 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) 511 nouveau_mem_check_nforce_dimms(dev); 512 513 /* setup a mtrr over the FB */ 514 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), 515 nouveau_mem_fb_amount(dev), 516 DRM_MTRR_WC); 517 518 /* Init FB */ 519 dev_priv->fb_phys=drm_get_resource_start(dev,1); 520 fb_size = nouveau_mem_fb_amount(dev); 521 /* On G80, limit VRAM to 512MiB temporarily due to limits in how 522 * we handle VRAM page tables. 523 */ 524 if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024)) 525 fb_size = (512 * 1024 * 1024); 526 fb_size -= dev_priv->ramin_rsvd_vram; 527 dev_priv->fb_available_size = fb_size; 528 DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); 529 530 if (fb_size>256*1024*1024) { 531 /* On cards with > 256Mb, you can't map everything. 532 * So we create a second FB heap for that type of memory */ 533 if (nouveau_mem_init_heap(&dev_priv->fb_heap, 534 0, 256*1024*1024)) 535 return -ENOMEM; 536 if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, 537 256*1024*1024, fb_size-256*1024*1024)) 538 return -ENOMEM; 539 } else { 540 if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) 541 return -ENOMEM; 542 dev_priv->fb_nomap_heap=NULL; 543 } 544 545#if !defined(__powerpc__) && !defined(__ia64__) 546 /* Init AGP / NV50 PCIEGART */ 547 if (drm_device_is_agp(dev) && dev->agp) { 548 if ((ret = nouveau_mem_init_agp(dev, 0))) 549 DRM_ERROR("Error initialising AGP: %d\n", ret); 550 } 551#endif 552 553 /*Note: this is *not* just NV50 code, but only used on NV50 for now */ 554 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && 555 dev_priv->card_type >= NV_50) { 556 ret = nouveau_sgdma_init(dev); 557 if (!ret) { 558 ret = nouveau_sgdma_nottm_hack_init(dev); 559 if (ret) 560 nouveau_sgdma_takedown(dev); 561 } 562 563 if (ret) 564 DRM_ERROR("Error initialising SG DMA: %d\n", ret); 565 } 566 567 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { 568 if (nouveau_mem_init_heap(&dev_priv->agp_heap, 569 0, dev_priv->gart_info.aper_size)) { 570 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { 571 nouveau_sgdma_nottm_hack_takedown(dev); 572 nouveau_sgdma_takedown(dev); 573 } 574 } 575 } 576 577 /* NV04-NV40 PCIEGART */ 578 if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { 579 struct drm_scatter_gather sgreq; 580 581 DRM_DEBUG("Allocating sg memory for PCI DMA\n"); 582 sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone 583 584 if (drm_sg_alloc(dev, &sgreq)) { 585 DRM_ERROR("Unable to allocate %ldMB of scatter-gather" 586 " pages for PCI DMA!",sgreq.size>>20); 587 } else { 588 if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, 589 dev->sg->pages * PAGE_SIZE)) { 590 DRM_ERROR("Unable to initialize pci_heap!"); 591 } 592 } 593 } 594 595 /* G8x: Allocate shared page table to map real VRAM pages into */ 596 if (dev_priv->card_type >= NV_50) { 597 unsigned size = ((512 * 1024 * 1024) / 65536) * 8; 598 599 ret = nouveau_gpuobj_new(dev, NULL, size, 0, 600 NVOBJ_FLAG_ZERO_ALLOC | 601 NVOBJ_FLAG_ALLOW_NO_REFS, 602 &dev_priv->vm_vram_pt); 603 if (ret) { 604 DRM_ERROR("Error creating VRAM page table: %d\n", ret); 605 return ret; 606 } 607 } 608 609 610 return 0; 611} 612 613struct mem_block * 614nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, 615 int flags, struct drm_file *file_priv) 616{ 617 struct drm_nouveau_private *dev_priv = dev->dev_private; 618 struct mem_block *block; 619 int type, tail = !(flags & NOUVEAU_MEM_USER); 620 621 /* 622 * Make things easier on ourselves: all allocations are page-aligned. 623 * We need that to map allocated regions into the user space 624 */ 625 if (alignment < PAGE_SIZE) 626 alignment = PAGE_SIZE; 627 628 /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB 629 * page size in the GPU VM. 630 */ 631 if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) { 632 size = (size + 65535) & ~65535; 633 if (alignment < 65536) 634 alignment = 65536; 635 } 636 637 /* Further down wants alignment in pages, not bytes */ 638 alignment >>= PAGE_SHIFT; 639 640 /* 641 * Warn about 0 sized allocations, but let it go through. It'll return 1 page 642 */ 643 if (size == 0) 644 DRM_INFO("warning : 0 byte allocation\n"); 645 646 /* 647 * Keep alloc size a multiple of the page size to keep drm_addmap() happy 648 */ 649 if (size & (~PAGE_MASK)) 650 size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; 651 652 653#define NOUVEAU_MEM_ALLOC_AGP {\ 654 type=NOUVEAU_MEM_AGP;\ 655 block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ 656 alignment, file_priv, tail); \ 657 if (block) goto alloc_ok;\ 658 } 659 660#define NOUVEAU_MEM_ALLOC_PCI {\ 661 type = NOUVEAU_MEM_PCI;\ 662 block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \ 663 alignment, file_priv, tail); \ 664 if ( block ) goto alloc_ok;\ 665 } 666 667#define NOUVEAU_MEM_ALLOC_FB {\ 668 type=NOUVEAU_MEM_FB;\ 669 if (!(flags&NOUVEAU_MEM_MAPPED)) {\ 670 block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ 671 size, alignment, \ 672 file_priv, tail); \ 673 if (block) goto alloc_ok;\ 674 }\ 675 block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ 676 alignment, file_priv, tail);\ 677 if (block) goto alloc_ok;\ 678 } 679 680 681 if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB 682 if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP 683 if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI 684 if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB 685 if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP 686 if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI 687 688 689 return NULL; 690 691alloc_ok: 692 block->flags=type; 693 694 /* On G8x, map memory into VM */ 695 if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && 696 !(flags & NOUVEAU_MEM_NOVM)) { 697 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; 698 unsigned offset = block->start; 699 unsigned count = block->size / 65536; 700 unsigned tile = 0; 701 702 if (!pt) { 703 DRM_ERROR("vm alloc without vm pt\n"); 704 nouveau_mem_free_block(block); 705 return NULL; 706 } 707 708 /* The tiling stuff is *not* what NVIDIA does - but both the 709 * 2D and 3D engines seem happy with this simpler method. 710 * Should look into why NVIDIA do what they do at some point. 711 */ 712 if (flags & NOUVEAU_MEM_TILE) { 713 if (flags & NOUVEAU_MEM_TILE_ZETA) 714 tile = 0x00002800; 715 else 716 tile = 0x00007000; 717 } 718 719 while (count--) { 720 unsigned pte = offset / 65536; 721 722 INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); 723 INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); 724 offset += 65536; 725 } 726 } else { 727 block->flags |= NOUVEAU_MEM_NOVM; 728 } 729 730 if (flags&NOUVEAU_MEM_MAPPED) 731 { 732 struct drm_map_list *entry; 733 int ret = 0; 734 block->flags|=NOUVEAU_MEM_MAPPED; 735 736 if (type == NOUVEAU_MEM_AGP) { 737 if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) 738 ret = drm_addmap(dev, block->start, block->size, 739 _DRM_AGP, 0, &block->map); 740 else 741 ret = drm_addmap(dev, block->start, block->size, 742 _DRM_SCATTER_GATHER, 0, &block->map); 743 } 744 else if (type == NOUVEAU_MEM_FB) 745 ret = drm_addmap(dev, block->start + dev_priv->fb_phys, 746 block->size, _DRM_FRAME_BUFFER, 747 0, &block->map); 748 else if (type == NOUVEAU_MEM_PCI) 749 ret = drm_addmap(dev, block->start, block->size, 750 _DRM_SCATTER_GATHER, 0, &block->map); 751 752 if (ret) { 753 nouveau_mem_free_block(block); 754 return NULL; 755 } 756 757 entry = drm_find_matching_map(dev, block->map); 758 if (!entry) { 759 nouveau_mem_free_block(block); 760 return NULL; 761 } 762 block->map_handle = entry->user_token; 763 } 764 765 DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags); 766 return block; 767} 768 769void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) 770{ 771 struct drm_nouveau_private *dev_priv = dev->dev_private; 772 773 DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags); 774 775 if (block->flags&NOUVEAU_MEM_MAPPED) 776 drm_rmmap(dev, block->map); 777 778 /* G8x: Remove pages from vm */ 779 if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && 780 !(block->flags & NOUVEAU_MEM_NOVM)) { 781 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; 782 unsigned offset = block->start; 783 unsigned count = block->size / 65536; 784 785 if (!pt) { 786 DRM_ERROR("vm free without vm pt\n"); 787 goto out_free; 788 } 789 790 while (count--) { 791 unsigned pte = offset / 65536; 792 INSTANCE_WR(pt, (pte * 2) + 0, 0); 793 INSTANCE_WR(pt, (pte * 2) + 1, 0); 794 offset += 65536; 795 } 796 } 797 798out_free: 799 nouveau_mem_free_block(block); 800} 801 802/* 803 * Ioctls 804 */ 805 806int 807nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, 808 struct drm_file *file_priv) 809{ 810 struct drm_nouveau_private *dev_priv = dev->dev_private; 811 struct drm_nouveau_mem_alloc *alloc = data; 812 struct mem_block *block; 813 814 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 815 816 if (alloc->flags & NOUVEAU_MEM_INTERNAL) 817 return -EINVAL; 818 819 block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size, 820 alloc->flags | NOUVEAU_MEM_USER, file_priv); 821 if (!block) 822 return -ENOMEM; 823 alloc->map_handle=block->map_handle; 824 alloc->offset=block->start; 825 alloc->flags=block->flags; 826 827 if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB) 828 alloc->offset += 512*1024*1024; 829 830 return 0; 831} 832 833int 834nouveau_ioctl_mem_free(struct drm_device *dev, void *data, 835 struct drm_file *file_priv) 836{ 837 struct drm_nouveau_private *dev_priv = dev->dev_private; 838 struct drm_nouveau_mem_free *memfree = data; 839 struct mem_block *block; 840 841 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 842 843 if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB) 844 memfree->offset -= 512*1024*1024; 845 846 block=NULL; 847 if (dev_priv->fb_heap && memfree->flags & NOUVEAU_MEM_FB) 848 block = find_block(dev_priv->fb_heap, memfree->offset); 849 else if (dev_priv->agp_heap && memfree->flags & NOUVEAU_MEM_AGP) 850 block = find_block(dev_priv->agp_heap, memfree->offset); 851 else if (dev_priv->pci_heap && memfree->flags & NOUVEAU_MEM_PCI) 852 block = find_block(dev_priv->pci_heap, memfree->offset); 853 if (!block) 854 return -EFAULT; 855 if (block->file_priv != file_priv) 856 return -EPERM; 857 858 nouveau_mem_free(dev, block); 859 return 0; 860} 861 862int 863nouveau_ioctl_mem_tile(struct drm_device *dev, void *data, 864 struct drm_file *file_priv) 865{ 866 struct drm_nouveau_private *dev_priv = dev->dev_private; 867 struct drm_nouveau_mem_tile *memtile = data; 868 struct mem_block *block = NULL; 869 870 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 871 872 if (dev_priv->card_type < NV_50) 873 return -EINVAL; 874 875 if (memtile->flags & NOUVEAU_MEM_FB) { 876 memtile->offset -= 512*1024*1024; 877 block = find_block(dev_priv->fb_heap, memtile->offset); 878 } 879 880 if (!block) 881 return -EINVAL; 882 883 if (block->file_priv != file_priv) 884 return -EPERM; 885 886 { 887 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; 888 unsigned offset = block->start + memtile->delta; 889 unsigned count = memtile->size / 65536; 890 unsigned tile = 0; 891 892 if (memtile->flags & NOUVEAU_MEM_TILE) { 893 if (memtile->flags & NOUVEAU_MEM_TILE_ZETA) 894 tile = 0x00002800; 895 else 896 tile = 0x00007000; 897 } 898 899 while (count--) { 900 unsigned pte = offset / 65536; 901 902 INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); 903 INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); 904 offset += 65536; 905 } 906 } 907 908 return 0; 909} 910 911