i915_dma.c revision 287177
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_dma.c 287177 2015-08-26 22:19:53Z bapt $"); 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm.h> 34#include <dev/drm2/i915/i915_drm.h> 35#include <dev/drm2/i915/i915_drv.h> 36#include <dev/drm2/i915/intel_drv.h> 37#include <dev/drm2/i915/intel_ringbuffer.h> 38 39#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS]) 40 41#define BEGIN_LP_RING(n) \ 42 intel_ring_begin(LP_RING(dev_priv), (n)) 43 44#define OUT_RING(x) \ 45 intel_ring_emit(LP_RING(dev_priv), x) 46 47#define ADVANCE_LP_RING() \ 48 intel_ring_advance(LP_RING(dev_priv)) 49 50/** 51 * Lock test for when it's just for synchronization of ring access. 52 * 53 * In that case, we don't need to do it when GEM is initialized as nobody else 54 * has access to the ring. 55 */ 56#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 57 if (LP_RING(dev->dev_private)->obj == NULL) \ 58 LOCK_TEST_WITH_RETURN(dev, file); \ 59} while (0) 60 61static inline u32 62intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) 63{ 64 if (I915_NEED_GFX_HWS(dev_priv->dev)) 65 return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg]; 66 else 67 return intel_read_status_page(LP_RING(dev_priv), reg); 68} 69 70#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) 71#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 72#define I915_BREADCRUMB_INDEX 0x21 73 74void i915_update_dri1_breadcrumb(struct drm_device *dev) 75{ 76 drm_i915_private_t *dev_priv = dev->dev_private; 77 struct drm_i915_master_private *master_priv; 78 79 if (dev->primary->master) { 80 master_priv = dev->primary->master->driver_priv; 81 if (master_priv->sarea_priv) 82 master_priv->sarea_priv->last_dispatch = 83 READ_BREADCRUMB(dev_priv); 84 } 85} 86 87static void i915_write_hws_pga(struct drm_device *dev) 88{ 89 drm_i915_private_t *dev_priv = dev->dev_private; 90 u32 addr; 91 92 addr = dev_priv->status_page_dmah->busaddr; 93 if (INTEL_INFO(dev)->gen >= 4) 94 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 95 I915_WRITE(HWS_PGA, addr); 96} 97 98/** 99 * Sets up the hardware status page for devices that need a physical address 100 * in the register. 101 */ 102static int i915_init_phys_hws(struct drm_device *dev) 103{ 104 drm_i915_private_t *dev_priv = dev->dev_private; 105 struct intel_ring_buffer *ring = LP_RING(dev_priv); 106 107 /* 108 * Program Hardware Status Page 109 * XXXKIB Keep 4GB limit for allocation for now. This method 110 * of allocation is used on <= 965 hardware, that has several 111 * erratas regarding the use of physical memory > 4 GB. 112 */ 113 dev_priv->status_page_dmah = 114 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR); 115 if (!dev_priv->status_page_dmah) { 116 DRM_ERROR("Can not allocate hardware status page\n"); 117 return -ENOMEM; 118 } 119 ring->status_page.page_addr = dev_priv->hw_status_page = 120 dev_priv->status_page_dmah->vaddr; 121 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 122 123 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 124 125 i915_write_hws_pga(dev); 126 DRM_DEBUG("Enabled hardware status page, phys %jx\n", 127 (uintmax_t)dev_priv->dma_status_page); 128 return 0; 129} 130 131/** 132 * Frees the hardware status page, whether it's a physical address or a virtual 133 * address set up by the X Server. 134 */ 135static void i915_free_hws(struct drm_device *dev) 136{ 137 drm_i915_private_t *dev_priv = dev->dev_private; 138 struct intel_ring_buffer *ring = LP_RING(dev_priv); 139 140 if (dev_priv->status_page_dmah) { 141 drm_pci_free(dev, dev_priv->status_page_dmah); 142 dev_priv->status_page_dmah = NULL; 143 } 144 145 if (dev_priv->status_gfx_addr) { 146 dev_priv->status_gfx_addr = 0; 147 ring->status_page.gfx_addr = 0; 148 pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr, 149 PAGE_SIZE); 150 } 151 152 /* Need to rewrite hardware status page */ 153 I915_WRITE(HWS_PGA, 0x1ffff000); 154} 155 156void i915_kernel_lost_context(struct drm_device * dev) 157{ 158 drm_i915_private_t *dev_priv = dev->dev_private; 159 struct drm_i915_master_private *master_priv; 160 struct intel_ring_buffer *ring = LP_RING(dev_priv); 161 162 /* 163 * We should never lose context on the ring with modesetting 164 * as we don't expose it to userspace 165 */ 166 if (drm_core_check_feature(dev, DRIVER_MODESET)) 167 return; 168 169 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 170 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 171 ring->space = ring->head - (ring->tail + 8); 172 if (ring->space < 0) 173 ring->space += ring->size; 174 175 if (!dev->primary->master) 176 return; 177 178 master_priv = dev->primary->master->driver_priv; 179 if (ring->head == ring->tail && master_priv->sarea_priv) 180 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 181} 182 183static int i915_dma_cleanup(struct drm_device * dev) 184{ 185 drm_i915_private_t *dev_priv = dev->dev_private; 186 int i; 187 188 /* Make sure interrupts are disabled here because the uninstall ioctl 189 * may not have been called from userspace and after dev_private 190 * is freed, it's too late. 191 */ 192 if (dev->irq_enabled) 193 drm_irq_uninstall(dev); 194 195 DRM_LOCK(dev); 196 for (i = 0; i < I915_NUM_RINGS; i++) 197 intel_cleanup_ring_buffer(&dev_priv->rings[i]); 198 DRM_UNLOCK(dev); 199 200 /* Clear the HWS virtual address at teardown */ 201 if (I915_NEED_GFX_HWS(dev)) 202 i915_free_hws(dev); 203 204 return 0; 205} 206 207static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 208{ 209 drm_i915_private_t *dev_priv = dev->dev_private; 210 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 211 int ret; 212 213 master_priv->sarea = drm_getsarea(dev); 214 if (master_priv->sarea) { 215 master_priv->sarea_priv = (drm_i915_sarea_t *) 216 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 217 } else { 218 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); 219 } 220 221 if (init->ring_size != 0) { 222 if (LP_RING(dev_priv)->obj != NULL) { 223 i915_dma_cleanup(dev); 224 DRM_ERROR("Client tried to initialize ringbuffer in " 225 "GEM mode\n"); 226 return -EINVAL; 227 } 228 229 ret = intel_render_ring_init_dri(dev, 230 init->ring_start, 231 init->ring_size); 232 if (ret) { 233 i915_dma_cleanup(dev); 234 return ret; 235 } 236 } 237 238 dev_priv->cpp = init->cpp; 239 dev_priv->back_offset = init->back_offset; 240 dev_priv->front_offset = init->front_offset; 241 dev_priv->current_page = 0; 242 if (master_priv->sarea_priv) 243 master_priv->sarea_priv->pf_current_page = 0; 244 245 /* Allow hardware batchbuffers unless told otherwise. 246 */ 247 dev_priv->dri1.allow_batchbuffer = 1; 248 249 return 0; 250} 251 252static int i915_dma_resume(struct drm_device * dev) 253{ 254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 255 struct intel_ring_buffer *ring = LP_RING(dev_priv); 256 257 DRM_DEBUG("\n"); 258 259 if (ring->virtual_start == NULL) { 260 DRM_ERROR("can not ioremap virtual address for" 261 " ring buffer\n"); 262 return -ENOMEM; 263 } 264 265 /* Program Hardware Status Page */ 266 if (!ring->status_page.page_addr) { 267 DRM_ERROR("Can not find hardware status page\n"); 268 return -EINVAL; 269 } 270 DRM_DEBUG("hw status page @ %p\n", ring->status_page.page_addr); 271 if (ring->status_page.gfx_addr != 0) 272 intel_ring_setup_status_page(ring); 273 else 274 i915_write_hws_pga(dev); 275 276 DRM_DEBUG("Enabled hardware status page\n"); 277 278 return 0; 279} 280 281static int i915_dma_init(struct drm_device *dev, void *data, 282 struct drm_file *file_priv) 283{ 284 drm_i915_init_t *init = data; 285 int retcode = 0; 286 287 if (drm_core_check_feature(dev, DRIVER_MODESET)) 288 return -ENODEV; 289 290 switch (init->func) { 291 case I915_INIT_DMA: 292 retcode = i915_initialize(dev, init); 293 break; 294 case I915_CLEANUP_DMA: 295 retcode = i915_dma_cleanup(dev); 296 break; 297 case I915_RESUME_DMA: 298 retcode = i915_dma_resume(dev); 299 break; 300 default: 301 retcode = -EINVAL; 302 break; 303 } 304 305 return retcode; 306} 307 308/* Implement basically the same security restrictions as hardware does 309 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 310 * 311 * Most of the calculations below involve calculating the size of a 312 * particular instruction. It's important to get the size right as 313 * that tells us where the next instruction to check is. Any illegal 314 * instruction detected will be given a size of zero, which is a 315 * signal to abort the rest of the buffer. 316 */ 317static int validate_cmd(int cmd) 318{ 319 switch (((cmd >> 29) & 0x7)) { 320 case 0x0: 321 switch ((cmd >> 23) & 0x3f) { 322 case 0x0: 323 return 1; /* MI_NOOP */ 324 case 0x4: 325 return 1; /* MI_FLUSH */ 326 default: 327 return 0; /* disallow everything else */ 328 } 329 break; 330 case 0x1: 331 return 0; /* reserved */ 332 case 0x2: 333 return (cmd & 0xff) + 2; /* 2d commands */ 334 case 0x3: 335 if (((cmd >> 24) & 0x1f) <= 0x18) 336 return 1; 337 338 switch ((cmd >> 24) & 0x1f) { 339 case 0x1c: 340 return 1; 341 case 0x1d: 342 switch ((cmd >> 16) & 0xff) { 343 case 0x3: 344 return (cmd & 0x1f) + 2; 345 case 0x4: 346 return (cmd & 0xf) + 2; 347 default: 348 return (cmd & 0xffff) + 2; 349 } 350 case 0x1e: 351 if (cmd & (1 << 23)) 352 return (cmd & 0xffff) + 1; 353 else 354 return 1; 355 case 0x1f: 356 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 357 return (cmd & 0x1ffff) + 2; 358 else if (cmd & (1 << 17)) /* indirect random */ 359 if ((cmd & 0xffff) == 0) 360 return 0; /* unknown length, too hard */ 361 else 362 return (((cmd & 0xffff) + 1) / 2) + 1; 363 else 364 return 2; /* indirect sequential */ 365 default: 366 return 0; 367 } 368 default: 369 return 0; 370 } 371 372 return 0; 373} 374 375static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 376{ 377 drm_i915_private_t *dev_priv = dev->dev_private; 378 int i; 379 380 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 381 return -EINVAL; 382 383 BEGIN_LP_RING((dwords+1)&~1); 384 385 for (i = 0; i < dwords;) { 386 int cmd, sz; 387 388 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 389 return -EINVAL; 390 391 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 392 return -EINVAL; 393 394 OUT_RING(cmd); 395 396 while (++i, --sz) { 397 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 398 sizeof(cmd))) { 399 return -EINVAL; 400 } 401 OUT_RING(cmd); 402 } 403 } 404 405 if (dwords & 1) 406 OUT_RING(0); 407 408 ADVANCE_LP_RING(); 409 410 return 0; 411} 412 413int 414i915_emit_box(struct drm_device *dev, 415 struct drm_clip_rect *box, 416 int DR1, int DR4) 417{ 418 struct drm_i915_private *dev_priv = dev->dev_private; 419 int ret; 420 421 if (box->y2 <= box->y1 || box->x2 <= box->x1 || 422 box->y2 <= 0 || box->x2 <= 0) { 423 DRM_ERROR("Bad box %d,%d..%d,%d\n", 424 box->x1, box->y1, box->x2, box->y2); 425 return -EINVAL; 426 } 427 428 if (INTEL_INFO(dev)->gen >= 4) { 429 ret = BEGIN_LP_RING(4); 430 if (ret) 431 return ret; 432 433 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 434 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 435 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 436 OUT_RING(DR4); 437 } else { 438 ret = BEGIN_LP_RING(6); 439 if (ret) 440 return ret; 441 442 OUT_RING(GFX_OP_DRAWRECT_INFO); 443 OUT_RING(DR1); 444 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 445 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 446 OUT_RING(DR4); 447 OUT_RING(0); 448 } 449 ADVANCE_LP_RING(); 450 451 return 0; 452} 453 454/* XXX: Emitting the counter should really be moved to part of the IRQ 455 * emit. For now, do it in both places: 456 */ 457 458static void i915_emit_breadcrumb(struct drm_device *dev) 459{ 460 drm_i915_private_t *dev_priv = dev->dev_private; 461 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 462 463 if (++dev_priv->counter > 0x7FFFFFFFUL) 464 dev_priv->counter = 0; 465 if (master_priv->sarea_priv) 466 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 467 468 if (BEGIN_LP_RING(4) == 0) { 469 OUT_RING(MI_STORE_DWORD_INDEX); 470 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 471 OUT_RING(dev_priv->counter); 472 OUT_RING(0); 473 ADVANCE_LP_RING(); 474 } 475} 476 477static int i915_dispatch_cmdbuffer(struct drm_device * dev, 478 drm_i915_cmdbuffer_t *cmd, 479 struct drm_clip_rect *cliprects, 480 void *cmdbuf) 481{ 482 int nbox = cmd->num_cliprects; 483 int i = 0, count, ret; 484 485 if (cmd->sz & 0x3) { 486 DRM_ERROR("alignment\n"); 487 return -EINVAL; 488 } 489 490 i915_kernel_lost_context(dev); 491 492 count = nbox ? nbox : 1; 493 494 for (i = 0; i < count; i++) { 495 if (i < nbox) { 496 ret = i915_emit_box(dev, &cliprects[i], 497 cmd->DR1, cmd->DR4); 498 if (ret) 499 return ret; 500 } 501 502 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 503 if (ret) 504 return ret; 505 } 506 507 i915_emit_breadcrumb(dev); 508 return 0; 509} 510 511static int i915_dispatch_batchbuffer(struct drm_device * dev, 512 drm_i915_batchbuffer_t * batch, 513 struct drm_clip_rect *cliprects) 514{ 515 struct drm_i915_private *dev_priv = dev->dev_private; 516 int nbox = batch->num_cliprects; 517 int i, count, ret; 518 519 if (drm_core_check_feature(dev, DRIVER_MODESET)) 520 return -ENODEV; 521 522 if ((batch->start | batch->used) & 0x7) { 523 DRM_ERROR("alignment\n"); 524 return -EINVAL; 525 } 526 527 i915_kernel_lost_context(dev); 528 529 count = nbox ? nbox : 1; 530 for (i = 0; i < count; i++) { 531 if (i < nbox) { 532 ret = i915_emit_box(dev, &cliprects[i], 533 batch->DR1, batch->DR4); 534 if (ret) 535 return ret; 536 } 537 538 if (!IS_I830(dev) && !IS_845G(dev)) { 539 ret = BEGIN_LP_RING(2); 540 if (ret) 541 return ret; 542 543 if (INTEL_INFO(dev)->gen >= 4) { 544 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | 545 MI_BATCH_NON_SECURE_I965); 546 OUT_RING(batch->start); 547 } else { 548 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 549 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 550 } 551 } else { 552 ret = BEGIN_LP_RING(4); 553 if (ret) 554 return ret; 555 556 OUT_RING(MI_BATCH_BUFFER); 557 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 558 OUT_RING(batch->start + batch->used - 4); 559 OUT_RING(0); 560 } 561 ADVANCE_LP_RING(); 562 } 563 564 i915_emit_breadcrumb(dev); 565 566 return 0; 567} 568 569static int i915_dispatch_flip(struct drm_device * dev) 570{ 571 drm_i915_private_t *dev_priv = dev->dev_private; 572 struct drm_i915_master_private *master_priv = 573 dev->primary->master->driver_priv; 574 int ret; 575 576 if (!master_priv->sarea_priv) 577 return -EINVAL; 578 579 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 580 __func__, 581 dev_priv->current_page, 582 master_priv->sarea_priv->pf_current_page); 583 584 i915_kernel_lost_context(dev); 585 586 ret = BEGIN_LP_RING(10); 587 if (ret) 588 return ret; 589 590 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 591 OUT_RING(0); 592 593 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 594 OUT_RING(0); 595 if (dev_priv->current_page == 0) { 596 OUT_RING(dev_priv->back_offset); 597 dev_priv->current_page = 1; 598 } else { 599 OUT_RING(dev_priv->front_offset); 600 dev_priv->current_page = 0; 601 } 602 OUT_RING(0); 603 604 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 605 OUT_RING(0); 606 607 ADVANCE_LP_RING(); 608 609 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 610 611 if (BEGIN_LP_RING(4) == 0) { 612 OUT_RING(MI_STORE_DWORD_INDEX); 613 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 614 OUT_RING(dev_priv->counter); 615 OUT_RING(0); 616 ADVANCE_LP_RING(); 617 } 618 619 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 620 return 0; 621} 622 623static int i915_quiescent(struct drm_device *dev) 624{ 625 struct intel_ring_buffer *ring = LP_RING(dev->dev_private); 626 627 i915_kernel_lost_context(dev); 628 return (intel_wait_ring_idle(ring)); 629} 630 631static int i915_flush_ioctl(struct drm_device *dev, void *data, 632 struct drm_file *file_priv) 633{ 634 int ret; 635 636 if (drm_core_check_feature(dev, DRIVER_MODESET)) 637 return -ENODEV; 638 639 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 640 641 DRM_LOCK(dev); 642 ret = i915_quiescent(dev); 643 DRM_UNLOCK(dev); 644 645 return (ret); 646} 647 648int i915_batchbuffer(struct drm_device *dev, void *data, 649 struct drm_file *file_priv) 650{ 651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 652 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 653 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 654 master_priv->sarea_priv; 655 drm_i915_batchbuffer_t *batch = data; 656 struct drm_clip_rect *cliprects; 657 size_t cliplen; 658 int ret; 659 660 if (!dev_priv->dri1.allow_batchbuffer) { 661 DRM_ERROR("Batchbuffer ioctl disabled\n"); 662 return -EINVAL; 663 } 664 665 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 666 batch->start, batch->used, batch->num_cliprects); 667 668 cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect); 669 if (batch->num_cliprects < 0) 670 return -EFAULT; 671 if (batch->num_cliprects != 0) { 672 cliprects = malloc(batch->num_cliprects * 673 sizeof(struct drm_clip_rect), DRM_MEM_DMA, 674 M_WAITOK | M_ZERO); 675 676 ret = -copyin(batch->cliprects, cliprects, 677 batch->num_cliprects * sizeof(struct drm_clip_rect)); 678 if (ret != 0) 679 goto fail_free; 680 } else 681 cliprects = NULL; 682 683 DRM_LOCK(dev); 684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 685 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 686 DRM_UNLOCK(dev); 687 688 if (sarea_priv) 689 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 690 691fail_free: 692 free(cliprects, DRM_MEM_DMA); 693 return ret; 694} 695 696int i915_cmdbuffer(struct drm_device *dev, void *data, 697 struct drm_file *file_priv) 698{ 699 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 700 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 701 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 702 master_priv->sarea_priv; 703 drm_i915_cmdbuffer_t *cmdbuf = data; 704 struct drm_clip_rect *cliprects = NULL; 705 void *batch_data; 706 int ret; 707 708 if (drm_core_check_feature(dev, DRIVER_MODESET)) 709 return -ENODEV; 710 711 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 712 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 713 714 if (cmdbuf->num_cliprects < 0) 715 return -EINVAL; 716 717 batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK); 718 719 ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz); 720 if (ret != 0) 721 goto fail_batch_free; 722 723 if (cmdbuf->num_cliprects) { 724 cliprects = malloc(cmdbuf->num_cliprects * 725 sizeof(struct drm_clip_rect), DRM_MEM_DMA, 726 M_WAITOK | M_ZERO); 727 ret = -copyin(cmdbuf->cliprects, cliprects, 728 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect)); 729 if (ret != 0) 730 goto fail_clip_free; 731 } 732 733 DRM_LOCK(dev); 734 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 735 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 736 DRM_UNLOCK(dev); 737 if (ret) { 738 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 739 goto fail_clip_free; 740 } 741 742 if (sarea_priv) 743 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 744 745fail_clip_free: 746 free(cliprects, DRM_MEM_DMA); 747fail_batch_free: 748 free(batch_data, DRM_MEM_DMA); 749 return ret; 750} 751 752static int i915_emit_irq(struct drm_device * dev) 753{ 754 drm_i915_private_t *dev_priv = dev->dev_private; 755 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 756 757 i915_kernel_lost_context(dev); 758 759 DRM_DEBUG("i915: emit_irq\n"); 760 761 dev_priv->counter++; 762 if (dev_priv->counter > 0x7FFFFFFFUL) 763 dev_priv->counter = 1; 764 if (master_priv->sarea_priv) 765 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 766 767 if (BEGIN_LP_RING(4) == 0) { 768 OUT_RING(MI_STORE_DWORD_INDEX); 769 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 770 OUT_RING(dev_priv->counter); 771 OUT_RING(MI_USER_INTERRUPT); 772 ADVANCE_LP_RING(); 773 } 774 775 return dev_priv->counter; 776} 777 778static int i915_wait_irq(struct drm_device * dev, int irq_nr) 779{ 780 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 781 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 782 int ret; 783 struct intel_ring_buffer *ring = LP_RING(dev_priv); 784 785 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 786 READ_BREADCRUMB(dev_priv)); 787 788 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 789 if (master_priv->sarea_priv) 790 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 791 return 0; 792 } 793 794 if (master_priv->sarea_priv) 795 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 796 797 ret = 0; 798 mtx_lock(&dev_priv->irq_lock); 799 if (ring->irq_get(ring)) { 800 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) { 801 ret = -msleep(ring, &dev_priv->irq_lock, PCATCH, 802 "915wtq", 3 * hz); 803 if (ret == -ERESTART) 804 ret = -ERESTARTSYS; 805 } 806 ring->irq_put(ring); 807 mtx_unlock(&dev_priv->irq_lock); 808 } else { 809 mtx_unlock(&dev_priv->irq_lock); 810 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr, 811 3000, 1, "915wir")) 812 ret = -EBUSY; 813 } 814 815 if (ret == -EBUSY) { 816 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 817 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 818 } 819 820 return ret; 821} 822 823/* Needs the lock as it touches the ring. 824 */ 825int i915_irq_emit(struct drm_device *dev, void *data, 826 struct drm_file *file_priv) 827{ 828 drm_i915_private_t *dev_priv = dev->dev_private; 829 drm_i915_irq_emit_t *emit = data; 830 int result; 831 832 if (drm_core_check_feature(dev, DRIVER_MODESET)) 833 return -ENODEV; 834 835 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { 836 DRM_ERROR("called with no initialization\n"); 837 return -EINVAL; 838 } 839 840 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 841 842 DRM_LOCK(dev); 843 result = i915_emit_irq(dev); 844 DRM_UNLOCK(dev); 845 846 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 847 DRM_ERROR("copy_to_user\n"); 848 return -EFAULT; 849 } 850 851 return 0; 852} 853 854/* Doesn't need the hardware lock. 855 */ 856static int i915_irq_wait(struct drm_device *dev, void *data, 857 struct drm_file *file_priv) 858{ 859 drm_i915_private_t *dev_priv = dev->dev_private; 860 drm_i915_irq_wait_t *irqwait = data; 861 862 if (drm_core_check_feature(dev, DRIVER_MODESET)) 863 return -ENODEV; 864 865 if (!dev_priv) { 866 DRM_ERROR("called with no initialization\n"); 867 return -EINVAL; 868 } 869 870 return i915_wait_irq(dev, irqwait->irq_seq); 871} 872 873static int i915_vblank_pipe_get(struct drm_device *dev, void *data, 874 struct drm_file *file_priv) 875{ 876 drm_i915_private_t *dev_priv = dev->dev_private; 877 drm_i915_vblank_pipe_t *pipe = data; 878 879 if (drm_core_check_feature(dev, DRIVER_MODESET)) 880 return -ENODEV; 881 882 if (!dev_priv) { 883 DRM_ERROR("called with no initialization\n"); 884 return -EINVAL; 885 } 886 887 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 888 889 return 0; 890} 891 892/** 893 * Schedule buffer swap at given vertical blank. 894 */ 895static int i915_vblank_swap(struct drm_device *dev, void *data, 896 struct drm_file *file_priv) 897{ 898 /* The delayed swap mechanism was fundamentally racy, and has been 899 * removed. The model was that the client requested a delayed flip/swap 900 * from the kernel, then waited for vblank before continuing to perform 901 * rendering. The problem was that the kernel might wake the client 902 * up before it dispatched the vblank swap (since the lock has to be 903 * held while touching the ringbuffer), in which case the client would 904 * clear and start the next frame before the swap occurred, and 905 * flicker would occur in addition to likely missing the vblank. 906 * 907 * In the absence of this ioctl, userland falls back to a correct path 908 * of waiting for a vblank, then dispatching the swap on its own. 909 * Context switching to userland and back is plenty fast enough for 910 * meeting the requirements of vblank swapping. 911 */ 912 return -EINVAL; 913} 914 915static int i915_flip_bufs(struct drm_device *dev, void *data, 916 struct drm_file *file_priv) 917{ 918 int ret; 919 920 if (drm_core_check_feature(dev, DRIVER_MODESET)) 921 return -ENODEV; 922 923 DRM_DEBUG("%s\n", __func__); 924 925 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 926 927 DRM_LOCK(dev); 928 ret = i915_dispatch_flip(dev); 929 DRM_UNLOCK(dev); 930 931 return ret; 932} 933 934int i915_getparam(struct drm_device *dev, void *data, 935 struct drm_file *file_priv) 936{ 937 drm_i915_private_t *dev_priv = dev->dev_private; 938 drm_i915_getparam_t *param = data; 939 int value; 940 941 if (!dev_priv) { 942 DRM_ERROR("called with no initialization\n"); 943 return -EINVAL; 944 } 945 946 switch (param->param) { 947 case I915_PARAM_IRQ_ACTIVE: 948 value = dev->irq_enabled ? 1 : 0; 949 break; 950 case I915_PARAM_ALLOW_BATCHBUFFER: 951 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; 952 break; 953 case I915_PARAM_LAST_DISPATCH: 954 value = READ_BREADCRUMB(dev_priv); 955 break; 956 case I915_PARAM_CHIPSET_ID: 957 value = dev->pci_device; 958 break; 959 case I915_PARAM_HAS_GEM: 960 value = 1; 961 break; 962 case I915_PARAM_NUM_FENCES_AVAIL: 963 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 964 break; 965 case I915_PARAM_HAS_OVERLAY: 966 value = dev_priv->overlay ? 1 : 0; 967 break; 968 case I915_PARAM_HAS_PAGEFLIPPING: 969 value = 1; 970 break; 971 case I915_PARAM_HAS_EXECBUF2: 972 value = 1; 973 break; 974 case I915_PARAM_HAS_BSD: 975 value = intel_ring_initialized(&dev_priv->rings[VCS]); 976 break; 977 case I915_PARAM_HAS_BLT: 978 value = intel_ring_initialized(&dev_priv->rings[BCS]); 979 break; 980 case I915_PARAM_HAS_RELAXED_FENCING: 981 value = 1; 982 break; 983 case I915_PARAM_HAS_COHERENT_RINGS: 984 value = 1; 985 break; 986 case I915_PARAM_HAS_EXEC_CONSTANTS: 987 value = INTEL_INFO(dev)->gen >= 4; 988 break; 989 case I915_PARAM_HAS_RELAXED_DELTA: 990 value = 1; 991 break; 992 case I915_PARAM_HAS_GEN7_SOL_RESET: 993 value = 1; 994 break; 995 case I915_PARAM_HAS_LLC: 996 value = HAS_LLC(dev); 997 break; 998 case I915_PARAM_HAS_ALIASING_PPGTT: 999 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 1000 break; 1001 default: 1002 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1003 param->param); 1004 return -EINVAL; 1005 } 1006 1007 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 1008 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 1009 return -EFAULT; 1010 } 1011 1012 return 0; 1013} 1014 1015static int i915_setparam(struct drm_device *dev, void *data, 1016 struct drm_file *file_priv) 1017{ 1018 drm_i915_private_t *dev_priv = dev->dev_private; 1019 drm_i915_setparam_t *param = data; 1020 1021 if (!dev_priv) { 1022 DRM_ERROR("called with no initialization\n"); 1023 return -EINVAL; 1024 } 1025 1026 switch (param->param) { 1027 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 1028 break; 1029 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 1030 break; 1031 case I915_SETPARAM_ALLOW_BATCHBUFFER: 1032 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; 1033 break; 1034 case I915_SETPARAM_NUM_USED_FENCES: 1035 if (param->value > dev_priv->num_fence_regs || 1036 param->value < 0) 1037 return -EINVAL; 1038 /* Userspace can use first N regs */ 1039 dev_priv->fence_reg_start = param->value; 1040 break; 1041 default: 1042 DRM_DEBUG("unknown parameter %d\n", param->param); 1043 return -EINVAL; 1044 } 1045 1046 return 0; 1047} 1048 1049static int i915_set_status_page(struct drm_device *dev, void *data, 1050 struct drm_file *file_priv) 1051{ 1052 drm_i915_private_t *dev_priv = dev->dev_private; 1053 drm_i915_hws_addr_t *hws = data; 1054 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1055 1056 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1057 return -ENODEV; 1058 1059 if (!I915_NEED_GFX_HWS(dev)) 1060 return -EINVAL; 1061 1062 if (!dev_priv) { 1063 DRM_ERROR("called with no initialization\n"); 1064 return -EINVAL; 1065 } 1066 1067 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); 1068 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1069 DRM_ERROR("tried to set status page when mode setting active\n"); 1070 return 0; 1071 } 1072 1073 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 1074 hws->addr & (0x1ffff<<12); 1075 1076 dev_priv->dri1.gfx_hws_cpu_addr = pmap_mapdev_attr( 1077 dev->agp->base + hws->addr, PAGE_SIZE, 1078 VM_MEMATTR_WRITE_COMBINING); 1079 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { 1080 i915_dma_cleanup(dev); 1081 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0; 1082 DRM_ERROR("can not ioremap virtual address for" 1083 " G33 hw status page\n"); 1084 return -ENOMEM; 1085 } 1086 1087 memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); 1088 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 1089 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 1090 dev_priv->status_gfx_addr); 1091 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 1092 return 0; 1093} 1094 1095static int 1096i915_load_modeset_init(struct drm_device *dev) 1097{ 1098 struct drm_i915_private *dev_priv = dev->dev_private; 1099 int ret; 1100 1101 ret = intel_parse_bios(dev); 1102 if (ret) 1103 DRM_INFO("failed to find VBIOS tables\n"); 1104 1105#if 0 1106 intel_register_dsm_handler(); 1107#endif 1108 1109 /* Initialise stolen first so that we may reserve preallocated 1110 * objects for the BIOS to KMS transition. 1111 */ 1112 ret = i915_gem_init_stolen(dev); 1113 if (ret) 1114 goto cleanup_vga_switcheroo; 1115 1116 intel_modeset_init(dev); 1117 1118 ret = i915_gem_init(dev); 1119 if (ret) 1120 goto cleanup_gem_stolen; 1121 1122 intel_modeset_gem_init(dev); 1123 1124 ret = drm_irq_install(dev); 1125 if (ret) 1126 goto cleanup_gem; 1127 1128 dev->vblank_disable_allowed = 1; 1129 1130 ret = intel_fbdev_init(dev); 1131 if (ret) 1132 goto cleanup_gem; 1133 1134 drm_kms_helper_poll_init(dev); 1135 1136 /* We're off and running w/KMS */ 1137 dev_priv->mm.suspended = 0; 1138 1139 return 0; 1140 1141cleanup_gem: 1142 DRM_LOCK(dev); 1143 i915_gem_cleanup_ringbuffer(dev); 1144 DRM_UNLOCK(dev); 1145 i915_gem_cleanup_aliasing_ppgtt(dev); 1146cleanup_gem_stolen: 1147 i915_gem_cleanup_stolen(dev); 1148cleanup_vga_switcheroo: 1149 return (ret); 1150} 1151 1152int i915_master_create(struct drm_device *dev, struct drm_master *master) 1153{ 1154 struct drm_i915_master_private *master_priv; 1155 1156 master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA, 1157 M_NOWAIT | M_ZERO); 1158 if (!master_priv) 1159 return -ENOMEM; 1160 1161 master->driver_priv = master_priv; 1162 return 0; 1163} 1164 1165void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1166{ 1167 struct drm_i915_master_private *master_priv = master->driver_priv; 1168 1169 if (!master_priv) 1170 return; 1171 1172 free(master_priv, DRM_MEM_DMA); 1173 1174 master->driver_priv = NULL; 1175} 1176 1177static int 1178i915_get_bridge_dev(struct drm_device *dev) 1179{ 1180 struct drm_i915_private *dev_priv; 1181 1182 dev_priv = dev->dev_private; 1183 1184 dev_priv->bridge_dev = intel_gtt_get_bridge_device(); 1185 if (dev_priv->bridge_dev == NULL) { 1186 DRM_ERROR("bridge device not found\n"); 1187 return (-1); 1188 } 1189 return (0); 1190} 1191 1192#define MCHBAR_I915 0x44 1193#define MCHBAR_I965 0x48 1194#define MCHBAR_SIZE (4*4096) 1195 1196#define DEVEN_REG 0x54 1197#define DEVEN_MCHBAR_EN (1 << 28) 1198 1199/* Allocate space for the MCH regs if needed, return nonzero on error */ 1200static int 1201intel_alloc_mchbar_resource(struct drm_device *dev) 1202{ 1203 drm_i915_private_t *dev_priv; 1204 device_t vga; 1205 int reg; 1206 u32 temp_lo, temp_hi; 1207 u64 mchbar_addr, temp; 1208 1209 dev_priv = dev->dev_private; 1210 reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1211 1212 if (INTEL_INFO(dev)->gen >= 4) 1213 temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4); 1214 else 1215 temp_hi = 0; 1216 temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4); 1217 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 1218 1219 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 1220#ifdef XXX_CONFIG_PNP 1221 if (mchbar_addr && 1222 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 1223 return 0; 1224#endif 1225 1226 /* Get some space for it */ 1227 vga = device_get_parent(dev->dev); 1228 dev_priv->mch_res_rid = 0x100; 1229 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), 1230 dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL, 1231 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE); 1232 if (dev_priv->mch_res == NULL) { 1233 DRM_ERROR("failed mchbar resource alloc\n"); 1234 return (-ENOMEM); 1235 } 1236 1237 if (INTEL_INFO(dev)->gen >= 4) { 1238 temp = rman_get_start(dev_priv->mch_res); 1239 temp >>= 32; 1240 pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4); 1241 } 1242 pci_write_config(dev_priv->bridge_dev, reg, 1243 rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4); 1244 return (0); 1245} 1246 1247static void 1248intel_setup_mchbar(struct drm_device *dev) 1249{ 1250 drm_i915_private_t *dev_priv; 1251 int mchbar_reg; 1252 u32 temp; 1253 bool enabled; 1254 1255 dev_priv = dev->dev_private; 1256 mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1257 1258 dev_priv->mchbar_need_disable = false; 1259 1260 if (IS_I915G(dev) || IS_I915GM(dev)) { 1261 temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4); 1262 enabled = (temp & DEVEN_MCHBAR_EN) != 0; 1263 } else { 1264 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); 1265 enabled = temp & 1; 1266 } 1267 1268 /* If it's already enabled, don't have to do anything */ 1269 if (enabled) { 1270 DRM_DEBUG("mchbar already enabled\n"); 1271 return; 1272 } 1273 1274 if (intel_alloc_mchbar_resource(dev)) 1275 return; 1276 1277 dev_priv->mchbar_need_disable = true; 1278 1279 /* Space is allocated or reserved, so enable it. */ 1280 if (IS_I915G(dev) || IS_I915GM(dev)) { 1281 pci_write_config(dev_priv->bridge_dev, DEVEN_REG, 1282 temp | DEVEN_MCHBAR_EN, 4); 1283 } else { 1284 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); 1285 pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4); 1286 } 1287} 1288 1289static void 1290intel_teardown_mchbar(struct drm_device *dev) 1291{ 1292 drm_i915_private_t *dev_priv; 1293 device_t vga; 1294 int mchbar_reg; 1295 u32 temp; 1296 1297 dev_priv = dev->dev_private; 1298 mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1299 1300 if (dev_priv->mchbar_need_disable) { 1301 if (IS_I915G(dev) || IS_I915GM(dev)) { 1302 temp = pci_read_config(dev_priv->bridge_dev, 1303 DEVEN_REG, 4); 1304 temp &= ~DEVEN_MCHBAR_EN; 1305 pci_write_config(dev_priv->bridge_dev, DEVEN_REG, 1306 temp, 4); 1307 } else { 1308 temp = pci_read_config(dev_priv->bridge_dev, 1309 mchbar_reg, 4); 1310 temp &= ~1; 1311 pci_write_config(dev_priv->bridge_dev, mchbar_reg, 1312 temp, 4); 1313 } 1314 } 1315 1316 if (dev_priv->mch_res != NULL) { 1317 vga = device_get_parent(dev->dev); 1318 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev, 1319 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 1320 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev, 1321 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 1322 dev_priv->mch_res = NULL; 1323 } 1324} 1325 1326/** 1327 * i915_driver_load - setup chip and create an initial config 1328 * @dev: DRM device 1329 * @flags: startup flags 1330 * 1331 * The driver load routine has to do several things: 1332 * - drive output discovery via intel_modeset_init() 1333 * - initialize the memory manager 1334 * - allocate initial config memory 1335 * - setup the DRM framebuffer with the allocated memory 1336 */ 1337int i915_driver_load(struct drm_device *dev, unsigned long flags) 1338{ 1339 struct drm_i915_private *dev_priv = dev->dev_private; 1340 const struct intel_device_info *info; 1341 unsigned long base, size; 1342 int mmio_bar, ret; 1343 1344 info = i915_get_device_id(dev->pci_device); 1345 1346 /* Refuse to load on gen6+ without kms enabled. */ 1347 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1348 return -ENODEV; 1349 1350 1351 ret = 0; 1352 1353 /* i915 has 4 more counters */ 1354 dev->counters += 4; 1355 dev->types[6] = _DRM_STAT_IRQ; 1356 dev->types[7] = _DRM_STAT_PRIMARY; 1357 dev->types[8] = _DRM_STAT_SECONDARY; 1358 dev->types[9] = _DRM_STAT_DMA; 1359 1360 dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER, 1361 M_ZERO | M_WAITOK); 1362 1363 dev->dev_private = (void *)dev_priv; 1364 dev_priv->dev = dev; 1365 dev_priv->info = info; 1366 1367 if (i915_get_bridge_dev(dev)) { 1368 free(dev_priv, DRM_MEM_DRIVER); 1369 return (-EIO); 1370 } 1371 dev_priv->mm.gtt = intel_gtt_get(); 1372 1373 /* Add register map (needed for suspend/resume) */ 1374 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1375 base = drm_get_resource_start(dev, mmio_bar); 1376 size = drm_get_resource_len(dev, mmio_bar); 1377 1378 ret = drm_addmap(dev, base, size, _DRM_REGISTERS, 1379 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map); 1380 if (ret != 0) { 1381 DRM_ERROR("Failed to allocate mmio_map: %d\n", ret); 1382 free(dev_priv, DRM_MEM_DRIVER); 1383 return (ret); 1384 } 1385 1386 dev_priv->tq = taskqueue_create("915", M_WAITOK, 1387 taskqueue_thread_enqueue, &dev_priv->tq); 1388 taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq"); 1389 mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF); 1390 mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF); 1391 mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF); 1392 mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF); 1393 mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF); 1394 1395 intel_irq_init(dev); 1396 1397 intel_setup_mchbar(dev); 1398 intel_setup_gmbus(dev); 1399 intel_opregion_setup(dev); 1400 1401 intel_setup_bios(dev); 1402 1403 i915_gem_load(dev); 1404 1405 /* On the 945G/GM, the chipset reports the MSI capability on the 1406 * integrated graphics even though the support isn't actually there 1407 * according to the published specs. It doesn't appear to function 1408 * correctly in testing on 945G. 1409 * This may be a side effect of MSI having been made available for PEG 1410 * and the registers being closely associated. 1411 * 1412 * According to chipset errata, on the 965GM, MSI interrupts may 1413 * be lost or delayed, but we use them anyways to avoid 1414 * stuck interrupts on some machines. 1415 */ 1416 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1417 drm_pci_enable_msi(dev); 1418 1419 /* Init HWS */ 1420 if (!I915_NEED_GFX_HWS(dev)) { 1421 ret = i915_init_phys_hws(dev); 1422 if (ret != 0) { 1423 drm_rmmap(dev, dev_priv->mmio_map); 1424 free(dev_priv, DRM_MEM_DRIVER); 1425 return ret; 1426 } 1427 } 1428 1429 mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF); 1430 1431 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1432 dev_priv->num_pipe = 3; 1433 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 1434 dev_priv->num_pipe = 2; 1435 else 1436 dev_priv->num_pipe = 1; 1437 1438 ret = drm_vblank_init(dev, dev_priv->num_pipe); 1439 if (ret) 1440 goto out_gem_unload; 1441 1442 /* Start out suspended */ 1443 dev_priv->mm.suspended = 1; 1444 1445 intel_detect_pch(dev); 1446 1447 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1448 ret = i915_load_modeset_init(dev); 1449 if (ret < 0) { 1450 DRM_ERROR("failed to init modeset\n"); 1451 goto out_gem_unload; 1452 } 1453 } 1454 1455 intel_opregion_init(dev); 1456 1457 callout_init(&dev_priv->hangcheck_timer, 1); 1458 callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD, 1459 i915_hangcheck_elapsed, dev); 1460 1461 if (IS_GEN5(dev)) 1462 intel_gpu_ips_init(dev_priv); 1463 1464 return (0); 1465 1466out_gem_unload: 1467 /* XXXKIB */ 1468 (void) i915_driver_unload(dev); 1469 return (ret); 1470} 1471 1472int i915_driver_unload(struct drm_device *dev) 1473{ 1474 struct drm_i915_private *dev_priv = dev->dev_private; 1475 int ret; 1476 1477 DRM_LOCK(dev); 1478 ret = i915_gpu_idle(dev); 1479 if (ret) 1480 DRM_ERROR("failed to idle hardware: %d\n", ret); 1481 i915_gem_retire_requests(dev); 1482 DRM_UNLOCK(dev); 1483 1484 i915_free_hws(dev); 1485 1486 intel_teardown_mchbar(dev); 1487 1488 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1489 intel_fbdev_fini(dev); 1490 intel_modeset_cleanup(dev); 1491 } 1492 1493 /* Free error state after interrupts are fully disabled. */ 1494 callout_stop(&dev_priv->hangcheck_timer); 1495 callout_drain(&dev_priv->hangcheck_timer); 1496 1497 i915_destroy_error_state(dev); 1498 1499 if (dev->msi_enabled) 1500 drm_pci_disable_msi(dev); 1501 1502 intel_opregion_fini(dev); 1503 1504 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1505 DRM_LOCK(dev); 1506 i915_gem_free_all_phys_object(dev); 1507 i915_gem_cleanup_ringbuffer(dev); 1508 i915_gem_context_fini(dev); 1509 DRM_UNLOCK(dev); 1510 i915_gem_cleanup_aliasing_ppgtt(dev); 1511#if 1 1512 KIB_NOTYET(); 1513#else 1514 if (I915_HAS_FBC(dev) && i915_powersave) 1515 i915_cleanup_compression(dev); 1516#endif 1517 drm_mm_takedown(&dev_priv->mm.stolen); 1518 1519 intel_cleanup_overlay(dev); 1520 1521 if (!I915_NEED_GFX_HWS(dev)) 1522 i915_free_hws(dev); 1523 } 1524 1525 i915_gem_unload(dev); 1526 1527 mtx_destroy(&dev_priv->irq_lock); 1528 1529 if (dev_priv->tq != NULL) 1530 taskqueue_free(dev_priv->tq); 1531 1532 bus_generic_detach(dev->dev); 1533 drm_rmmap(dev, dev_priv->mmio_map); 1534 intel_teardown_gmbus(dev); 1535 1536 mtx_destroy(&dev_priv->dpio_lock); 1537 mtx_destroy(&dev_priv->error_lock); 1538 mtx_destroy(&dev_priv->error_completion_lock); 1539 mtx_destroy(&dev_priv->rps_lock); 1540 free(dev->dev_private, DRM_MEM_DRIVER); 1541 1542 return 0; 1543} 1544 1545int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1546{ 1547 struct drm_i915_file_private *i915_file_priv; 1548 1549 i915_file_priv = malloc(sizeof(*i915_file_priv), DRM_MEM_FILES, 1550 M_WAITOK | M_ZERO); 1551 1552 mtx_init(&i915_file_priv->mm.lck, "915fp", NULL, MTX_DEF); 1553 INIT_LIST_HEAD(&i915_file_priv->mm.request_list); 1554 file->driver_priv = i915_file_priv; 1555 1556 drm_gem_names_init(&i915_file_priv->context_idr); 1557 1558 return 0; 1559} 1560 1561/** 1562 * i915_driver_lastclose - clean up after all DRM clients have exited 1563 * @dev: DRM device 1564 * 1565 * Take care of cleaning up after all DRM clients have exited. In the 1566 * mode setting case, we want to restore the kernel's initial mode (just 1567 * in case the last client left us in a bad state). 1568 * 1569 * Additionally, in the non-mode setting case, we'll tear down the GTT 1570 * and DMA structures, since the kernel won't be using them, and clea 1571 * up any GEM state. 1572 */ 1573void i915_driver_lastclose(struct drm_device * dev) 1574{ 1575 drm_i915_private_t *dev_priv = dev->dev_private; 1576 1577 /* On gen6+ we refuse to init without kms enabled, but then the drm core 1578 * goes right around and calls lastclose. Check for this and don't clean 1579 * up anything. */ 1580 if (!dev_priv) 1581 return; 1582 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1583#if 1 1584 KIB_NOTYET(); 1585#else 1586 drm_fb_helper_restore(); 1587 vga_switcheroo_process_delayed_switch(); 1588#endif 1589 return; 1590 } 1591 1592 i915_gem_lastclose(dev); 1593 1594 i915_dma_cleanup(dev); 1595} 1596 1597void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1598{ 1599 i915_gem_context_close(dev, file_priv); 1600 i915_gem_release(dev, file_priv); 1601} 1602 1603void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 1604{ 1605 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1606 1607 mtx_destroy(&i915_file_priv->mm.lck); 1608 free(i915_file_priv, DRM_MEM_FILES); 1609} 1610 1611struct drm_ioctl_desc i915_ioctls[] = { 1612 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1613 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1614 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 1615 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1616 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1617 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1618 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 1619 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1620 DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH), 1621 DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH), 1622 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1623 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1624 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1625 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1626 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 1627 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1628 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1629 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1630 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED), 1631 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED), 1632 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1633 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1634 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 1635 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 1636 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1637 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1638 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 1639 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 1640 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 1641 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 1642 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 1643 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 1644 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 1645 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 1646 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 1647 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 1648 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 1649 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 1650 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1651 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1652 DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1653 DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1654 DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), 1655 DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), 1656}; 1657 1658#ifdef COMPAT_FREEBSD32 1659extern struct drm_ioctl_desc i915_compat_ioctls[]; 1660extern int i915_compat_ioctls_nr; 1661#endif 1662 1663struct drm_driver i915_driver_info = { 1664 /* 1665 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on 1666 * Linux. 1667 */ 1668 .driver_features = 1669 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 1670 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, 1671 1672 .buf_priv_size = sizeof(drm_i915_private_t), 1673 .load = i915_driver_load, 1674 .open = i915_driver_open, 1675 .unload = i915_driver_unload, 1676 .preclose = i915_driver_preclose, 1677 .lastclose = i915_driver_lastclose, 1678 .postclose = i915_driver_postclose, 1679 .device_is_agp = i915_driver_device_is_agp, 1680 .master_create = i915_master_create, 1681 .master_destroy = i915_master_destroy, 1682 .gem_init_object = i915_gem_init_object, 1683 .gem_free_object = i915_gem_free_object, 1684 .gem_pager_ops = &i915_gem_pager_ops, 1685 .dumb_create = i915_gem_dumb_create, 1686 .dumb_map_offset = i915_gem_mmap_gtt, 1687 .dumb_destroy = i915_gem_dumb_destroy, 1688 .sysctl_init = i915_sysctl_init, 1689 .sysctl_cleanup = i915_sysctl_cleanup, 1690 1691 .ioctls = i915_ioctls, 1692#ifdef COMPAT_FREEBSD32 1693 .compat_ioctls = i915_compat_ioctls, 1694 .num_compat_ioctls = &i915_compat_ioctls_nr, 1695#endif 1696 .num_ioctls = ARRAY_SIZE(i915_ioctls), 1697 1698 .name = DRIVER_NAME, 1699 .desc = DRIVER_DESC, 1700 .date = DRIVER_DATE, 1701 .major = DRIVER_MAJOR, 1702 .minor = DRIVER_MINOR, 1703 .patchlevel = DRIVER_PATCHLEVEL, 1704}; 1705 1706/* 1707 * This is really ugly: Because old userspace abused the linux agp interface to 1708 * manage the gtt, we need to claim that all intel devices are agp. For 1709 * otherwise the drm core refuses to initialize the agp support code. 1710 */ 1711int i915_driver_device_is_agp(struct drm_device * dev) 1712{ 1713 return 1; 1714} 1715