i915_dma.c revision 291430
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3/* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/drm2/i915/i915_dma.c 291430 2015-11-28 17:37:41Z dumbbell $"); 31 32#include <dev/drm2/drmP.h> 33#include <dev/drm2/drm.h> 34#include <dev/drm2/i915/i915_drm.h> 35#include <dev/drm2/i915/i915_drv.h> 36#include <dev/drm2/i915/intel_drv.h> 37#include <dev/drm2/i915/intel_ringbuffer.h> 38 39#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS]) 40 41#define BEGIN_LP_RING(n) \ 42 intel_ring_begin(LP_RING(dev_priv), (n)) 43 44#define OUT_RING(x) \ 45 intel_ring_emit(LP_RING(dev_priv), x) 46 47#define ADVANCE_LP_RING() \ 48 intel_ring_advance(LP_RING(dev_priv)) 49 50/** 51 * Lock test for when it's just for synchronization of ring access. 52 * 53 * In that case, we don't need to do it when GEM is initialized as nobody else 54 * has access to the ring. 55 */ 56#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 57 if (LP_RING(dev->dev_private)->obj == NULL) \ 58 LOCK_TEST_WITH_RETURN(dev, file); \ 59} while (0) 60 61static inline u32 62intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) 63{ 64 if (I915_NEED_GFX_HWS(dev_priv->dev)) 65 return ((volatile u32*)(dev_priv->dri1.gfx_hws_cpu_addr))[reg]; 66 else 67 return intel_read_status_page(LP_RING(dev_priv), reg); 68} 69 70#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) 71#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 72#define I915_BREADCRUMB_INDEX 0x21 73 74void i915_update_dri1_breadcrumb(struct drm_device *dev) 75{ 76 drm_i915_private_t *dev_priv = dev->dev_private; 77 struct drm_i915_master_private *master_priv; 78 79 if (dev->primary->master) { 80 master_priv = dev->primary->master->driver_priv; 81 if (master_priv->sarea_priv) 82 master_priv->sarea_priv->last_dispatch = 83 READ_BREADCRUMB(dev_priv); 84 } 85} 86 87static void i915_write_hws_pga(struct drm_device *dev) 88{ 89 drm_i915_private_t *dev_priv = dev->dev_private; 90 u32 addr; 91 92 addr = dev_priv->status_page_dmah->busaddr; 93 if (INTEL_INFO(dev)->gen >= 4) 94 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 95 I915_WRITE(HWS_PGA, addr); 96} 97 98/** 99 * Sets up the hardware status page for devices that need a physical address 100 * in the register. 101 */ 102static int i915_init_phys_hws(struct drm_device *dev) 103{ 104 drm_i915_private_t *dev_priv = dev->dev_private; 105 struct intel_ring_buffer *ring = LP_RING(dev_priv); 106 107 /* 108 * Program Hardware Status Page 109 * XXXKIB Keep 4GB limit for allocation for now. This method 110 * of allocation is used on <= 965 hardware, that has several 111 * erratas regarding the use of physical memory > 4 GB. 112 */ 113 dev_priv->status_page_dmah = 114 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR); 115 if (!dev_priv->status_page_dmah) { 116 DRM_ERROR("Can not allocate hardware status page\n"); 117 return -ENOMEM; 118 } 119 ring->status_page.page_addr = dev_priv->hw_status_page = 120 dev_priv->status_page_dmah->vaddr; 121 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 122 123 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 124 125 i915_write_hws_pga(dev); 126 DRM_DEBUG("Enabled hardware status page, phys %jx\n", 127 (uintmax_t)dev_priv->dma_status_page); 128 return 0; 129} 130 131/** 132 * Frees the hardware status page, whether it's a physical address or a virtual 133 * address set up by the X Server. 134 */ 135static void i915_free_hws(struct drm_device *dev) 136{ 137 drm_i915_private_t *dev_priv = dev->dev_private; 138 struct intel_ring_buffer *ring = LP_RING(dev_priv); 139 140 if (dev_priv->status_page_dmah) { 141 drm_pci_free(dev, dev_priv->status_page_dmah); 142 dev_priv->status_page_dmah = NULL; 143 } 144 145 if (dev_priv->status_gfx_addr) { 146 dev_priv->status_gfx_addr = 0; 147 ring->status_page.gfx_addr = 0; 148 pmap_unmapdev((vm_offset_t)dev_priv->dri1.gfx_hws_cpu_addr, 149 PAGE_SIZE); 150 } 151 152 /* Need to rewrite hardware status page */ 153 I915_WRITE(HWS_PGA, 0x1ffff000); 154} 155 156void i915_kernel_lost_context(struct drm_device * dev) 157{ 158 drm_i915_private_t *dev_priv = dev->dev_private; 159 struct drm_i915_master_private *master_priv; 160 struct intel_ring_buffer *ring = LP_RING(dev_priv); 161 162 /* 163 * We should never lose context on the ring with modesetting 164 * as we don't expose it to userspace 165 */ 166 if (drm_core_check_feature(dev, DRIVER_MODESET)) 167 return; 168 169 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 170 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 171 ring->space = ring->head - (ring->tail + 8); 172 if (ring->space < 0) 173 ring->space += ring->size; 174 175 if (!dev->primary->master) 176 return; 177 178 master_priv = dev->primary->master->driver_priv; 179 if (ring->head == ring->tail && master_priv->sarea_priv) 180 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 181} 182 183static int i915_dma_cleanup(struct drm_device * dev) 184{ 185 drm_i915_private_t *dev_priv = dev->dev_private; 186 int i; 187 188 /* Make sure interrupts are disabled here because the uninstall ioctl 189 * may not have been called from userspace and after dev_private 190 * is freed, it's too late. 191 */ 192 if (dev->irq_enabled) 193 drm_irq_uninstall(dev); 194 195 DRM_LOCK(dev); 196 for (i = 0; i < I915_NUM_RINGS; i++) 197 intel_cleanup_ring_buffer(&dev_priv->rings[i]); 198 DRM_UNLOCK(dev); 199 200 /* Clear the HWS virtual address at teardown */ 201 if (I915_NEED_GFX_HWS(dev)) 202 i915_free_hws(dev); 203 204 return 0; 205} 206 207static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 208{ 209 drm_i915_private_t *dev_priv = dev->dev_private; 210 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 211 int ret; 212 213 master_priv->sarea = drm_getsarea(dev); 214 if (master_priv->sarea) { 215 master_priv->sarea_priv = (drm_i915_sarea_t *) 216 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 217 } else { 218 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); 219 } 220 221 if (init->ring_size != 0) { 222 if (LP_RING(dev_priv)->obj != NULL) { 223 i915_dma_cleanup(dev); 224 DRM_ERROR("Client tried to initialize ringbuffer in " 225 "GEM mode\n"); 226 return -EINVAL; 227 } 228 229 ret = intel_render_ring_init_dri(dev, 230 init->ring_start, 231 init->ring_size); 232 if (ret) { 233 i915_dma_cleanup(dev); 234 return ret; 235 } 236 } 237 238 dev_priv->cpp = init->cpp; 239 dev_priv->back_offset = init->back_offset; 240 dev_priv->front_offset = init->front_offset; 241 dev_priv->current_page = 0; 242 if (master_priv->sarea_priv) 243 master_priv->sarea_priv->pf_current_page = 0; 244 245 /* Allow hardware batchbuffers unless told otherwise. 246 */ 247 dev_priv->dri1.allow_batchbuffer = 1; 248 249 return 0; 250} 251 252static int i915_dma_resume(struct drm_device * dev) 253{ 254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 255 struct intel_ring_buffer *ring = LP_RING(dev_priv); 256 257 DRM_DEBUG_DRIVER("%s\n", __func__); 258 259 if (ring->virtual_start == NULL) { 260 DRM_ERROR("can not ioremap virtual address for" 261 " ring buffer\n"); 262 return -ENOMEM; 263 } 264 265 /* Program Hardware Status Page */ 266 if (!ring->status_page.page_addr) { 267 DRM_ERROR("Can not find hardware status page\n"); 268 return -EINVAL; 269 } 270 DRM_DEBUG_DRIVER("hw status page @ %p\n", 271 ring->status_page.page_addr); 272 if (ring->status_page.gfx_addr != 0) 273 intel_ring_setup_status_page(ring); 274 else 275 i915_write_hws_pga(dev); 276 277 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 278 279 return 0; 280} 281 282static int i915_dma_init(struct drm_device *dev, void *data, 283 struct drm_file *file_priv) 284{ 285 drm_i915_init_t *init = data; 286 int retcode = 0; 287 288 if (drm_core_check_feature(dev, DRIVER_MODESET)) 289 return -ENODEV; 290 291 switch (init->func) { 292 case I915_INIT_DMA: 293 retcode = i915_initialize(dev, init); 294 break; 295 case I915_CLEANUP_DMA: 296 retcode = i915_dma_cleanup(dev); 297 break; 298 case I915_RESUME_DMA: 299 retcode = i915_dma_resume(dev); 300 break; 301 default: 302 retcode = -EINVAL; 303 break; 304 } 305 306 return retcode; 307} 308 309/* Implement basically the same security restrictions as hardware does 310 * for MI_BATCH_NON_SECURE. These can be made stricter at any time. 311 * 312 * Most of the calculations below involve calculating the size of a 313 * particular instruction. It's important to get the size right as 314 * that tells us where the next instruction to check is. Any illegal 315 * instruction detected will be given a size of zero, which is a 316 * signal to abort the rest of the buffer. 317 */ 318static int validate_cmd(int cmd) 319{ 320 switch (((cmd >> 29) & 0x7)) { 321 case 0x0: 322 switch ((cmd >> 23) & 0x3f) { 323 case 0x0: 324 return 1; /* MI_NOOP */ 325 case 0x4: 326 return 1; /* MI_FLUSH */ 327 default: 328 return 0; /* disallow everything else */ 329 } 330 break; 331 case 0x1: 332 return 0; /* reserved */ 333 case 0x2: 334 return (cmd & 0xff) + 2; /* 2d commands */ 335 case 0x3: 336 if (((cmd >> 24) & 0x1f) <= 0x18) 337 return 1; 338 339 switch ((cmd >> 24) & 0x1f) { 340 case 0x1c: 341 return 1; 342 case 0x1d: 343 switch ((cmd >> 16) & 0xff) { 344 case 0x3: 345 return (cmd & 0x1f) + 2; 346 case 0x4: 347 return (cmd & 0xf) + 2; 348 default: 349 return (cmd & 0xffff) + 2; 350 } 351 case 0x1e: 352 if (cmd & (1 << 23)) 353 return (cmd & 0xffff) + 1; 354 else 355 return 1; 356 case 0x1f: 357 if ((cmd & (1 << 23)) == 0) /* inline vertices */ 358 return (cmd & 0x1ffff) + 2; 359 else if (cmd & (1 << 17)) /* indirect random */ 360 if ((cmd & 0xffff) == 0) 361 return 0; /* unknown length, too hard */ 362 else 363 return (((cmd & 0xffff) + 1) / 2) + 1; 364 else 365 return 2; /* indirect sequential */ 366 default: 367 return 0; 368 } 369 default: 370 return 0; 371 } 372 373 return 0; 374} 375 376static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 377{ 378 drm_i915_private_t *dev_priv = dev->dev_private; 379 int i; 380 381 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 382 return -EINVAL; 383 384 BEGIN_LP_RING((dwords+1)&~1); 385 386 for (i = 0; i < dwords;) { 387 int cmd, sz; 388 389 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 390 return -EINVAL; 391 392 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 393 return -EINVAL; 394 395 OUT_RING(cmd); 396 397 while (++i, --sz) { 398 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 399 sizeof(cmd))) { 400 return -EINVAL; 401 } 402 OUT_RING(cmd); 403 } 404 } 405 406 if (dwords & 1) 407 OUT_RING(0); 408 409 ADVANCE_LP_RING(); 410 411 return 0; 412} 413 414int 415i915_emit_box(struct drm_device *dev, 416 struct drm_clip_rect *box, 417 int DR1, int DR4) 418{ 419 struct drm_i915_private *dev_priv = dev->dev_private; 420 int ret; 421 422 if (box->y2 <= box->y1 || box->x2 <= box->x1 || 423 box->y2 <= 0 || box->x2 <= 0) { 424 DRM_ERROR("Bad box %d,%d..%d,%d\n", 425 box->x1, box->y1, box->x2, box->y2); 426 return -EINVAL; 427 } 428 429 if (INTEL_INFO(dev)->gen >= 4) { 430 ret = BEGIN_LP_RING(4); 431 if (ret) 432 return ret; 433 434 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 435 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 436 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 437 OUT_RING(DR4); 438 } else { 439 ret = BEGIN_LP_RING(6); 440 if (ret) 441 return ret; 442 443 OUT_RING(GFX_OP_DRAWRECT_INFO); 444 OUT_RING(DR1); 445 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); 446 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); 447 OUT_RING(DR4); 448 OUT_RING(0); 449 } 450 ADVANCE_LP_RING(); 451 452 return 0; 453} 454 455/* XXX: Emitting the counter should really be moved to part of the IRQ 456 * emit. For now, do it in both places: 457 */ 458 459static void i915_emit_breadcrumb(struct drm_device *dev) 460{ 461 drm_i915_private_t *dev_priv = dev->dev_private; 462 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 463 464 if (++dev_priv->counter > 0x7FFFFFFFUL) 465 dev_priv->counter = 0; 466 if (master_priv->sarea_priv) 467 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 468 469 if (BEGIN_LP_RING(4) == 0) { 470 OUT_RING(MI_STORE_DWORD_INDEX); 471 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 472 OUT_RING(dev_priv->counter); 473 OUT_RING(0); 474 ADVANCE_LP_RING(); 475 } 476} 477 478static int i915_dispatch_cmdbuffer(struct drm_device * dev, 479 drm_i915_cmdbuffer_t *cmd, 480 struct drm_clip_rect *cliprects, 481 void *cmdbuf) 482{ 483 int nbox = cmd->num_cliprects; 484 int i = 0, count, ret; 485 486 if (cmd->sz & 0x3) { 487 DRM_ERROR("alignment\n"); 488 return -EINVAL; 489 } 490 491 i915_kernel_lost_context(dev); 492 493 count = nbox ? nbox : 1; 494 495 for (i = 0; i < count; i++) { 496 if (i < nbox) { 497 ret = i915_emit_box(dev, &cliprects[i], 498 cmd->DR1, cmd->DR4); 499 if (ret) 500 return ret; 501 } 502 503 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); 504 if (ret) 505 return ret; 506 } 507 508 i915_emit_breadcrumb(dev); 509 return 0; 510} 511 512static int i915_dispatch_batchbuffer(struct drm_device * dev, 513 drm_i915_batchbuffer_t * batch, 514 struct drm_clip_rect *cliprects) 515{ 516 struct drm_i915_private *dev_priv = dev->dev_private; 517 int nbox = batch->num_cliprects; 518 int i, count, ret; 519 520 if (drm_core_check_feature(dev, DRIVER_MODESET)) 521 return -ENODEV; 522 523 if ((batch->start | batch->used) & 0x7) { 524 DRM_ERROR("alignment\n"); 525 return -EINVAL; 526 } 527 528 i915_kernel_lost_context(dev); 529 530 count = nbox ? nbox : 1; 531 for (i = 0; i < count; i++) { 532 if (i < nbox) { 533 ret = i915_emit_box(dev, &cliprects[i], 534 batch->DR1, batch->DR4); 535 if (ret) 536 return ret; 537 } 538 539 if (!IS_I830(dev) && !IS_845G(dev)) { 540 ret = BEGIN_LP_RING(2); 541 if (ret) 542 return ret; 543 544 if (INTEL_INFO(dev)->gen >= 4) { 545 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 546 OUT_RING(batch->start); 547 } else { 548 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 549 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 550 } 551 } else { 552 ret = BEGIN_LP_RING(4); 553 if (ret) 554 return ret; 555 556 OUT_RING(MI_BATCH_BUFFER); 557 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 558 OUT_RING(batch->start + batch->used - 4); 559 OUT_RING(0); 560 } 561 ADVANCE_LP_RING(); 562 } 563 564 i915_emit_breadcrumb(dev); 565 return 0; 566} 567 568static int i915_dispatch_flip(struct drm_device * dev) 569{ 570 drm_i915_private_t *dev_priv = dev->dev_private; 571 struct drm_i915_master_private *master_priv = 572 dev->primary->master->driver_priv; 573 int ret; 574 575 if (!master_priv->sarea_priv) 576 return -EINVAL; 577 578 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 579 __func__, 580 dev_priv->current_page, 581 master_priv->sarea_priv->pf_current_page); 582 583 i915_kernel_lost_context(dev); 584 585 ret = BEGIN_LP_RING(10); 586 if (ret) 587 return ret; 588 589 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 590 OUT_RING(0); 591 592 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 593 OUT_RING(0); 594 if (dev_priv->current_page == 0) { 595 OUT_RING(dev_priv->back_offset); 596 dev_priv->current_page = 1; 597 } else { 598 OUT_RING(dev_priv->front_offset); 599 dev_priv->current_page = 0; 600 } 601 OUT_RING(0); 602 603 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 604 OUT_RING(0); 605 606 ADVANCE_LP_RING(); 607 608 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 609 610 if (BEGIN_LP_RING(4) == 0) { 611 OUT_RING(MI_STORE_DWORD_INDEX); 612 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 613 OUT_RING(dev_priv->counter); 614 OUT_RING(0); 615 ADVANCE_LP_RING(); 616 } 617 618 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 619 return 0; 620} 621 622static int i915_quiescent(struct drm_device *dev) 623{ 624 i915_kernel_lost_context(dev); 625 return intel_wait_ring_idle(LP_RING(dev->dev_private)); 626} 627 628static int i915_flush_ioctl(struct drm_device *dev, void *data, 629 struct drm_file *file_priv) 630{ 631 int ret; 632 633 if (drm_core_check_feature(dev, DRIVER_MODESET)) 634 return -ENODEV; 635 636 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 637 638 DRM_LOCK(dev); 639 ret = i915_quiescent(dev); 640 DRM_UNLOCK(dev); 641 642 return ret; 643} 644 645int i915_batchbuffer(struct drm_device *dev, void *data, 646 struct drm_file *file_priv) 647{ 648 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 649 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 650 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 651 master_priv->sarea_priv; 652 drm_i915_batchbuffer_t *batch = data; 653 size_t cliplen; 654 int ret; 655 struct drm_clip_rect *cliprects = NULL; 656 657 if (!dev_priv->dri1.allow_batchbuffer) { 658 DRM_ERROR("Batchbuffer ioctl disabled\n"); 659 return -EINVAL; 660 } 661 662 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", 663 batch->start, batch->used, batch->num_cliprects); 664 665 cliplen = batch->num_cliprects * sizeof(struct drm_clip_rect); 666 if (batch->num_cliprects < 0) 667 return -EFAULT; 668 if (batch->num_cliprects != 0) { 669 cliprects = malloc(batch->num_cliprects * 670 sizeof(struct drm_clip_rect), 671 DRM_MEM_DMA, M_WAITOK | M_ZERO); 672 673 ret = -copyin(batch->cliprects, cliprects, 674 batch->num_cliprects * 675 sizeof(struct drm_clip_rect)); 676 if (ret != 0) 677 goto fail_free; 678 } else 679 cliprects = NULL; 680 681 DRM_LOCK(dev); 682 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 683 ret = i915_dispatch_batchbuffer(dev, batch, cliprects); 684 DRM_UNLOCK(dev); 685 686 if (sarea_priv) 687 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 688 689fail_free: 690 free(cliprects, DRM_MEM_DMA); 691 692 return ret; 693} 694 695int i915_cmdbuffer(struct drm_device *dev, void *data, 696 struct drm_file *file_priv) 697{ 698 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 699 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 700 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 701 master_priv->sarea_priv; 702 drm_i915_cmdbuffer_t *cmdbuf = data; 703 struct drm_clip_rect *cliprects = NULL; 704 void *batch_data; 705 int ret; 706 707 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 708 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 709 710 if (drm_core_check_feature(dev, DRIVER_MODESET)) 711 return -ENODEV; 712 713 if (cmdbuf->num_cliprects < 0) 714 return -EINVAL; 715 716 batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK); 717 718 ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz); 719 if (ret != 0) 720 goto fail_batch_free; 721 722 if (cmdbuf->num_cliprects) { 723 cliprects = malloc(cmdbuf->num_cliprects * 724 sizeof(struct drm_clip_rect), DRM_MEM_DMA, M_WAITOK | M_ZERO); 725 ret = -copyin(cmdbuf->cliprects, cliprects, 726 cmdbuf->num_cliprects * 727 sizeof(struct drm_clip_rect)); 728 if (ret != 0) 729 goto fail_clip_free; 730 } 731 732 DRM_LOCK(dev); 733 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 734 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); 735 DRM_UNLOCK(dev); 736 if (ret) { 737 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 738 goto fail_clip_free; 739 } 740 741 if (sarea_priv) 742 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 743 744fail_clip_free: 745 free(cliprects, DRM_MEM_DMA); 746fail_batch_free: 747 free(batch_data, DRM_MEM_DMA); 748 749 return ret; 750} 751 752static int i915_emit_irq(struct drm_device * dev) 753{ 754 drm_i915_private_t *dev_priv = dev->dev_private; 755 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 756 757 i915_kernel_lost_context(dev); 758 759 DRM_DEBUG_DRIVER("\n"); 760 761 dev_priv->counter++; 762 if (dev_priv->counter > 0x7FFFFFFFUL) 763 dev_priv->counter = 1; 764 if (master_priv->sarea_priv) 765 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 766 767 if (BEGIN_LP_RING(4) == 0) { 768 OUT_RING(MI_STORE_DWORD_INDEX); 769 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 770 OUT_RING(dev_priv->counter); 771 OUT_RING(MI_USER_INTERRUPT); 772 ADVANCE_LP_RING(); 773 } 774 775 return dev_priv->counter; 776} 777 778static int i915_wait_irq(struct drm_device * dev, int irq_nr) 779{ 780 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 781 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 782 int ret = 0; 783 struct intel_ring_buffer *ring = LP_RING(dev_priv); 784 785 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 786 READ_BREADCRUMB(dev_priv)); 787 788 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 789 if (master_priv->sarea_priv) 790 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 791 return 0; 792 } 793 794 if (master_priv->sarea_priv) 795 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 796 797 ret = 0; 798 mtx_lock(&dev_priv->irq_lock); 799 if (ring->irq_get(ring)) { 800 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) { 801 ret = -msleep(ring, &dev_priv->irq_lock, PCATCH, 802 "915wtq", 3 * hz); 803 if (ret == -ERESTART) 804 ret = -ERESTARTSYS; 805 } 806 ring->irq_put(ring); 807 mtx_unlock(&dev_priv->irq_lock); 808 } else { 809 mtx_unlock(&dev_priv->irq_lock); 810 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr, 811 3000, 1, "915wir")) 812 ret = -EBUSY; 813 } 814 815 if (ret == -EBUSY) { 816 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 817 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 818 } 819 820 return ret; 821} 822 823/* Needs the lock as it touches the ring. 824 */ 825int i915_irq_emit(struct drm_device *dev, void *data, 826 struct drm_file *file_priv) 827{ 828 drm_i915_private_t *dev_priv = dev->dev_private; 829 drm_i915_irq_emit_t *emit = data; 830 int result; 831 832 if (drm_core_check_feature(dev, DRIVER_MODESET)) 833 return -ENODEV; 834 835 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { 836 DRM_ERROR("called with no initialization\n"); 837 return -EINVAL; 838 } 839 840 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 841 842 DRM_LOCK(dev); 843 result = i915_emit_irq(dev); 844 DRM_UNLOCK(dev); 845 846 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 847 DRM_ERROR("copy_to_user\n"); 848 return -EFAULT; 849 } 850 851 return 0; 852} 853 854/* Doesn't need the hardware lock. 855 */ 856static int i915_irq_wait(struct drm_device *dev, void *data, 857 struct drm_file *file_priv) 858{ 859 drm_i915_private_t *dev_priv = dev->dev_private; 860 drm_i915_irq_wait_t *irqwait = data; 861 862 if (drm_core_check_feature(dev, DRIVER_MODESET)) 863 return -ENODEV; 864 865 if (!dev_priv) { 866 DRM_ERROR("called with no initialization\n"); 867 return -EINVAL; 868 } 869 870 return i915_wait_irq(dev, irqwait->irq_seq); 871} 872 873static int i915_vblank_pipe_get(struct drm_device *dev, void *data, 874 struct drm_file *file_priv) 875{ 876 drm_i915_private_t *dev_priv = dev->dev_private; 877 drm_i915_vblank_pipe_t *pipe = data; 878 879 if (drm_core_check_feature(dev, DRIVER_MODESET)) 880 return -ENODEV; 881 882 if (!dev_priv) { 883 DRM_ERROR("called with no initialization\n"); 884 return -EINVAL; 885 } 886 887 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 888 889 return 0; 890} 891 892/** 893 * Schedule buffer swap at given vertical blank. 894 */ 895static int i915_vblank_swap(struct drm_device *dev, void *data, 896 struct drm_file *file_priv) 897{ 898 /* The delayed swap mechanism was fundamentally racy, and has been 899 * removed. The model was that the client requested a delayed flip/swap 900 * from the kernel, then waited for vblank before continuing to perform 901 * rendering. The problem was that the kernel might wake the client 902 * up before it dispatched the vblank swap (since the lock has to be 903 * held while touching the ringbuffer), in which case the client would 904 * clear and start the next frame before the swap occurred, and 905 * flicker would occur in addition to likely missing the vblank. 906 * 907 * In the absence of this ioctl, userland falls back to a correct path 908 * of waiting for a vblank, then dispatching the swap on its own. 909 * Context switching to userland and back is plenty fast enough for 910 * meeting the requirements of vblank swapping. 911 */ 912 return -EINVAL; 913} 914 915static int i915_flip_bufs(struct drm_device *dev, void *data, 916 struct drm_file *file_priv) 917{ 918 int ret; 919 920 if (drm_core_check_feature(dev, DRIVER_MODESET)) 921 return -ENODEV; 922 923 DRM_DEBUG_DRIVER("%s\n", __func__); 924 925 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 926 927 DRM_LOCK(dev); 928 ret = i915_dispatch_flip(dev); 929 DRM_UNLOCK(dev); 930 931 return ret; 932} 933 934int i915_getparam(struct drm_device *dev, void *data, 935 struct drm_file *file_priv) 936{ 937 drm_i915_private_t *dev_priv = dev->dev_private; 938 drm_i915_getparam_t *param = data; 939 int value; 940 941 if (!dev_priv) { 942 DRM_ERROR("called with no initialization\n"); 943 return -EINVAL; 944 } 945 946 switch (param->param) { 947 case I915_PARAM_IRQ_ACTIVE: 948 value = dev->irq_enabled ? 1 : 0; 949 break; 950 case I915_PARAM_ALLOW_BATCHBUFFER: 951 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; 952 break; 953 case I915_PARAM_LAST_DISPATCH: 954 value = READ_BREADCRUMB(dev_priv); 955 break; 956 case I915_PARAM_CHIPSET_ID: 957 value = dev->pci_device; 958 break; 959 case I915_PARAM_HAS_GEM: 960 value = 1; 961 break; 962 case I915_PARAM_NUM_FENCES_AVAIL: 963 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 964 break; 965 case I915_PARAM_HAS_OVERLAY: 966 value = dev_priv->overlay ? 1 : 0; 967 break; 968 case I915_PARAM_HAS_PAGEFLIPPING: 969 value = 1; 970 break; 971 case I915_PARAM_HAS_EXECBUF2: 972 value = 1; 973 break; 974 case I915_PARAM_HAS_BSD: 975 value = intel_ring_initialized(&dev_priv->rings[VCS]); 976 break; 977 case I915_PARAM_HAS_BLT: 978 value = intel_ring_initialized(&dev_priv->rings[BCS]); 979 break; 980 case I915_PARAM_HAS_RELAXED_FENCING: 981 value = 1; 982 break; 983 case I915_PARAM_HAS_COHERENT_RINGS: 984 value = 1; 985 break; 986 case I915_PARAM_HAS_EXEC_CONSTANTS: 987 value = INTEL_INFO(dev)->gen >= 4; 988 break; 989 case I915_PARAM_HAS_RELAXED_DELTA: 990 value = 1; 991 break; 992 case I915_PARAM_HAS_GEN7_SOL_RESET: 993 value = 1; 994 break; 995 case I915_PARAM_HAS_LLC: 996 value = HAS_LLC(dev); 997 break; 998 case I915_PARAM_HAS_ALIASING_PPGTT: 999 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 1000 break; 1001 default: 1002 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1003 param->param); 1004 return -EINVAL; 1005 } 1006 1007 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 1008 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 1009 return -EFAULT; 1010 } 1011 1012 return 0; 1013} 1014 1015static int i915_setparam(struct drm_device *dev, void *data, 1016 struct drm_file *file_priv) 1017{ 1018 drm_i915_private_t *dev_priv = dev->dev_private; 1019 drm_i915_setparam_t *param = data; 1020 1021 if (!dev_priv) { 1022 DRM_ERROR("called with no initialization\n"); 1023 return -EINVAL; 1024 } 1025 1026 switch (param->param) { 1027 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 1028 break; 1029 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 1030 break; 1031 case I915_SETPARAM_ALLOW_BATCHBUFFER: 1032 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; 1033 break; 1034 case I915_SETPARAM_NUM_USED_FENCES: 1035 if (param->value > dev_priv->num_fence_regs || 1036 param->value < 0) 1037 return -EINVAL; 1038 /* Userspace can use first N regs */ 1039 dev_priv->fence_reg_start = param->value; 1040 break; 1041 default: 1042 DRM_DEBUG_DRIVER("unknown parameter %d\n", 1043 param->param); 1044 return -EINVAL; 1045 } 1046 1047 return 0; 1048} 1049 1050static int i915_set_status_page(struct drm_device *dev, void *data, 1051 struct drm_file *file_priv) 1052{ 1053 drm_i915_private_t *dev_priv = dev->dev_private; 1054 drm_i915_hws_addr_t *hws = data; 1055 struct intel_ring_buffer *ring; 1056 1057 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1058 return -ENODEV; 1059 1060 if (!I915_NEED_GFX_HWS(dev)) 1061 return -EINVAL; 1062 1063 if (!dev_priv) { 1064 DRM_ERROR("called with no initialization\n"); 1065 return -EINVAL; 1066 } 1067 1068 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1069 DRM_ERROR("tried to set status page when mode setting active\n"); 1070 return 0; 1071 } 1072 1073 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 1074 1075 ring = LP_RING(dev_priv); 1076 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 1077 hws->addr & (0x1ffff<<12); 1078 1079 dev_priv->dri1.gfx_hws_cpu_addr = 1080 pmap_mapdev_attr(dev->agp->base + hws->addr, PAGE_SIZE, 1081 VM_MEMATTR_WRITE_COMBINING); 1082 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { 1083 i915_dma_cleanup(dev); 1084 ring->status_page.gfx_addr = dev_priv->status_gfx_addr = 0; 1085 DRM_ERROR("can not ioremap virtual address for" 1086 " G33 hw status page\n"); 1087 return -ENOMEM; 1088 } 1089 1090 memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); 1091 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 1092 1093 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 1094 dev_priv->status_gfx_addr); 1095 DRM_DEBUG_DRIVER("load hws at %p\n", 1096 dev_priv->hw_status_page); 1097 return 0; 1098} 1099 1100static int i915_get_bridge_dev(struct drm_device *dev) 1101{ 1102 struct drm_i915_private *dev_priv = dev->dev_private; 1103 1104 dev_priv->bridge_dev = intel_gtt_get_bridge_device(); 1105 if (!dev_priv->bridge_dev) { 1106 DRM_ERROR("bridge device not found\n"); 1107 return -1; 1108 } 1109 return 0; 1110} 1111 1112#define MCHBAR_I915 0x44 1113#define MCHBAR_I965 0x48 1114#define MCHBAR_SIZE (4*4096) 1115 1116#define DEVEN_REG 0x54 1117#define DEVEN_MCHBAR_EN (1 << 28) 1118 1119/* Allocate space for the MCH regs if needed, return nonzero on error */ 1120static int 1121intel_alloc_mchbar_resource(struct drm_device *dev) 1122{ 1123 drm_i915_private_t *dev_priv = dev->dev_private; 1124 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1125 u32 temp_lo, temp_hi = 0; 1126 u64 mchbar_addr, temp; 1127 1128 if (INTEL_INFO(dev)->gen >= 4) 1129 temp_hi = pci_read_config(dev_priv->bridge_dev, reg + 4, 4); 1130 temp_lo = pci_read_config(dev_priv->bridge_dev, reg, 4); 1131 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 1132 1133 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 1134#ifdef XXX_CONFIG_PNP 1135 if (mchbar_addr && 1136 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 1137 return 0; 1138#endif 1139 1140 /* Get some space for it */ 1141 device_t vga; 1142 vga = device_get_parent(dev->dev); 1143 dev_priv->mch_res_rid = 0x100; 1144 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), 1145 dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL, 1146 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE); 1147 if (dev_priv->mch_res == NULL) { 1148 DRM_DEBUG_DRIVER("failed bus alloc\n"); 1149 return -ENOMEM; 1150 } 1151 1152 if (INTEL_INFO(dev)->gen >= 4) { 1153 temp = rman_get_start(dev_priv->mch_res); 1154 temp >>= 32; 1155 pci_write_config(dev_priv->bridge_dev, reg + 4, temp, 4); 1156 } 1157 pci_write_config(dev_priv->bridge_dev, reg, 1158 rman_get_start(dev_priv->mch_res) & UINT32_MAX, 4); 1159 return 0; 1160} 1161 1162static void 1163intel_setup_mchbar(struct drm_device *dev) 1164{ 1165 drm_i915_private_t *dev_priv = dev->dev_private; 1166 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1167 u32 temp; 1168 bool enabled; 1169 1170 dev_priv->mchbar_need_disable = false; 1171 1172 if (IS_I915G(dev) || IS_I915GM(dev)) { 1173 temp = pci_read_config(dev_priv->bridge_dev, DEVEN_REG, 4); 1174 enabled = (temp & DEVEN_MCHBAR_EN) != 0; 1175 } else { 1176 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); 1177 enabled = temp & 1; 1178 } 1179 1180 /* If it's already enabled, don't have to do anything */ 1181 if (enabled) { 1182 DRM_DEBUG("mchbar already enabled\n"); 1183 return; 1184 } 1185 1186 if (intel_alloc_mchbar_resource(dev)) 1187 return; 1188 1189 dev_priv->mchbar_need_disable = true; 1190 1191 /* Space is allocated or reserved, so enable it. */ 1192 if (IS_I915G(dev) || IS_I915GM(dev)) { 1193 pci_write_config(dev_priv->bridge_dev, DEVEN_REG, 1194 temp | DEVEN_MCHBAR_EN, 4); 1195 } else { 1196 temp = pci_read_config(dev_priv->bridge_dev, mchbar_reg, 4); 1197 pci_write_config(dev_priv->bridge_dev, mchbar_reg, temp | 1, 4); 1198 } 1199} 1200 1201static void 1202intel_teardown_mchbar(struct drm_device *dev) 1203{ 1204 drm_i915_private_t *dev_priv = dev->dev_private; 1205 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1206 u32 temp; 1207 1208 if (dev_priv->mchbar_need_disable) { 1209 if (IS_I915G(dev) || IS_I915GM(dev)) { 1210 temp = pci_read_config(dev_priv->bridge_dev, 1211 DEVEN_REG, 4); 1212 temp &= ~DEVEN_MCHBAR_EN; 1213 pci_write_config(dev_priv->bridge_dev, DEVEN_REG, 1214 temp, 4); 1215 } else { 1216 temp = pci_read_config(dev_priv->bridge_dev, 1217 mchbar_reg, 4); 1218 temp &= ~1; 1219 pci_write_config(dev_priv->bridge_dev, mchbar_reg, 1220 temp, 4); 1221 } 1222 } 1223 1224 if (dev_priv->mch_res != NULL) { 1225 device_t vga; 1226 vga = device_get_parent(dev->dev); 1227 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev, 1228 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 1229 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev, 1230 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 1231 dev_priv->mch_res = NULL; 1232 } 1233} 1234 1235static int i915_load_modeset_init(struct drm_device *dev) 1236{ 1237 struct drm_i915_private *dev_priv = dev->dev_private; 1238 int ret; 1239 1240 ret = intel_parse_bios(dev); 1241 if (ret) 1242 DRM_INFO("failed to find VBIOS tables\n"); 1243 1244#if 0 1245 intel_register_dsm_handler(); 1246#endif 1247 1248 /* Initialise stolen first so that we may reserve preallocated 1249 * objects for the BIOS to KMS transition. 1250 */ 1251 ret = i915_gem_init_stolen(dev); 1252 if (ret) 1253 goto cleanup_vga_switcheroo; 1254 1255 intel_modeset_init(dev); 1256 1257 ret = i915_gem_init(dev); 1258 if (ret) 1259 goto cleanup_gem_stolen; 1260 1261 intel_modeset_gem_init(dev); 1262 1263 ret = drm_irq_install(dev); 1264 if (ret) 1265 goto cleanup_gem; 1266 1267 dev->vblank_disable_allowed = 1; 1268 1269 ret = intel_fbdev_init(dev); 1270 if (ret) 1271 goto cleanup_gem; 1272 1273 drm_kms_helper_poll_init(dev); 1274 1275 /* We're off and running w/KMS */ 1276 dev_priv->mm.suspended = 0; 1277 1278 return 0; 1279 1280cleanup_gem: 1281 DRM_LOCK(dev); 1282 i915_gem_cleanup_ringbuffer(dev); 1283 DRM_UNLOCK(dev); 1284 i915_gem_cleanup_aliasing_ppgtt(dev); 1285cleanup_gem_stolen: 1286 i915_gem_cleanup_stolen(dev); 1287cleanup_vga_switcheroo: 1288 return ret; 1289} 1290 1291int i915_master_create(struct drm_device *dev, struct drm_master *master) 1292{ 1293 struct drm_i915_master_private *master_priv; 1294 1295 master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA, M_NOWAIT | M_ZERO); 1296 if (!master_priv) 1297 return -ENOMEM; 1298 1299 master->driver_priv = master_priv; 1300 return 0; 1301} 1302 1303void i915_master_destroy(struct drm_device *dev, struct drm_master *master) 1304{ 1305 struct drm_i915_master_private *master_priv = master->driver_priv; 1306 1307 if (!master_priv) 1308 return; 1309 1310 free(master_priv, DRM_MEM_DMA); 1311 1312 master->driver_priv = NULL; 1313} 1314 1315/** 1316 * i915_driver_load - setup chip and create an initial config 1317 * @dev: DRM device 1318 * @flags: startup flags 1319 * 1320 * The driver load routine has to do several things: 1321 * - drive output discovery via intel_modeset_init() 1322 * - initialize the memory manager 1323 * - allocate initial config memory 1324 * - setup the DRM framebuffer with the allocated memory 1325 */ 1326int i915_driver_load(struct drm_device *dev, unsigned long flags) 1327{ 1328 struct drm_i915_private *dev_priv; 1329 const struct intel_device_info *info; 1330 unsigned long base, size; 1331 int ret = 0, mmio_bar; 1332 1333 info = i915_get_device_id(dev->pci_device); 1334 1335 /* Refuse to load on gen6+ without kms enabled. */ 1336 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1337 return -ENODEV; 1338 1339 /* i915 has 4 more counters */ 1340 dev->counters += 4; 1341 dev->types[6] = _DRM_STAT_IRQ; 1342 dev->types[7] = _DRM_STAT_PRIMARY; 1343 dev->types[8] = _DRM_STAT_SECONDARY; 1344 dev->types[9] = _DRM_STAT_DMA; 1345 1346 dev_priv = malloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER, 1347 M_WAITOK | M_ZERO); 1348 1349 dev->dev_private = (void *)dev_priv; 1350 dev_priv->dev = dev; 1351 dev_priv->info = info; 1352 1353 if (i915_get_bridge_dev(dev)) { 1354 free(dev_priv, DRM_MEM_DRIVER); 1355 return -EIO; 1356 } 1357 dev_priv->mm.gtt = intel_gtt_get(); 1358 1359 /* Add register map (needed for suspend/resume) */ 1360 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1361 base = drm_get_resource_start(dev, mmio_bar); 1362 size = drm_get_resource_len(dev, mmio_bar); 1363 1364 ret = drm_addmap(dev, 1365 base, size, 1366 _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map); 1367 if (ret != 0) { 1368 DRM_ERROR("Failed to allocate mmio_map: %d\n", ret); 1369 free(dev_priv, DRM_MEM_DRIVER); 1370 return ret; 1371 } 1372 1373 dev_priv->tq = taskqueue_create("915", M_WAITOK, 1374 taskqueue_thread_enqueue, &dev_priv->tq); 1375 taskqueue_start_threads(&dev_priv->tq, 1, PWAIT, "i915 taskq"); 1376 mtx_init(&dev_priv->gt_lock, "915gt", NULL, MTX_DEF); 1377 mtx_init(&dev_priv->error_lock, "915err", NULL, MTX_DEF); 1378 mtx_init(&dev_priv->error_completion_lock, "915cmp", NULL, MTX_DEF); 1379 mtx_init(&dev_priv->rps_lock, "915rps", NULL, MTX_DEF); 1380 mtx_init(&dev_priv->dpio_lock, "915dpi", NULL, MTX_DEF); 1381 1382 intel_irq_init(dev); 1383 1384 intel_setup_mchbar(dev); 1385 intel_setup_gmbus(dev); 1386 intel_opregion_setup(dev); 1387 1388 intel_setup_bios(dev); 1389 1390 i915_gem_load(dev); 1391 1392 /* On the 945G/GM, the chipset reports the MSI capability on the 1393 * integrated graphics even though the support isn't actually there 1394 * according to the published specs. It doesn't appear to function 1395 * correctly in testing on 945G. 1396 * This may be a side effect of MSI having been made available for PEG 1397 * and the registers being closely associated. 1398 * 1399 * According to chipset errata, on the 965GM, MSI interrupts may 1400 * be lost or delayed, but we use them anyways to avoid 1401 * stuck interrupts on some machines. 1402 */ 1403 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1404 drm_pci_enable_msi(dev); 1405 1406 /* Init HWS */ 1407 if (!I915_NEED_GFX_HWS(dev)) { 1408 ret = i915_init_phys_hws(dev); 1409 if (ret != 0) { 1410 drm_rmmap(dev, dev_priv->mmio_map); 1411 free(dev_priv, DRM_MEM_DRIVER); 1412 return ret; 1413 } 1414 } 1415 1416 mtx_init(&dev_priv->irq_lock, "userirq", NULL, MTX_DEF); 1417 1418 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1419 dev_priv->num_pipe = 3; 1420 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 1421 dev_priv->num_pipe = 2; 1422 else 1423 dev_priv->num_pipe = 1; 1424 1425 ret = drm_vblank_init(dev, dev_priv->num_pipe); 1426 if (ret) 1427 goto out_gem_unload; 1428 1429 /* Start out suspended */ 1430 dev_priv->mm.suspended = 1; 1431 1432 intel_detect_pch(dev); 1433 1434 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1435 ret = i915_load_modeset_init(dev); 1436 if (ret < 0) { 1437 DRM_ERROR("failed to init modeset\n"); 1438 goto out_gem_unload; 1439 } 1440 } 1441 1442 pci_enable_busmaster(dev->dev); 1443 1444 intel_opregion_init(dev); 1445 1446 callout_init(&dev_priv->hangcheck_timer, 1); 1447 callout_reset(&dev_priv->hangcheck_timer, DRM_I915_HANGCHECK_PERIOD, 1448 i915_hangcheck_elapsed, dev); 1449 1450 if (IS_GEN5(dev)) 1451 intel_gpu_ips_init(dev_priv); 1452 1453 return 0; 1454 1455out_gem_unload: 1456 /* XXXKIB */ 1457 (void) i915_driver_unload(dev); 1458 return (ret); 1459} 1460 1461int i915_driver_unload(struct drm_device *dev) 1462{ 1463 struct drm_i915_private *dev_priv = dev->dev_private; 1464 int ret; 1465 1466 DRM_LOCK(dev); 1467 ret = i915_gpu_idle(dev); 1468 if (ret) 1469 DRM_ERROR("failed to idle hardware: %d\n", ret); 1470 i915_gem_retire_requests(dev); 1471 DRM_UNLOCK(dev); 1472 1473 i915_free_hws(dev); 1474 1475 intel_teardown_mchbar(dev); 1476 1477 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1478 intel_fbdev_fini(dev); 1479 intel_modeset_cleanup(dev); 1480 } 1481 1482 /* Free error state after interrupts are fully disabled. */ 1483 callout_stop(&dev_priv->hangcheck_timer); 1484 callout_drain(&dev_priv->hangcheck_timer); 1485 1486 i915_destroy_error_state(dev); 1487 1488 if (dev->msi_enabled) 1489 drm_pci_disable_msi(dev); 1490 1491 intel_opregion_fini(dev); 1492 1493 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1494 DRM_LOCK(dev); 1495 i915_gem_free_all_phys_object(dev); 1496 i915_gem_cleanup_ringbuffer(dev); 1497 i915_gem_context_fini(dev); 1498 DRM_UNLOCK(dev); 1499 i915_gem_cleanup_aliasing_ppgtt(dev); 1500#if 1 1501 KIB_NOTYET(); 1502#else 1503 if (I915_HAS_FBC(dev) && i915_powersave) 1504 i915_cleanup_compression(dev); 1505#endif 1506 drm_mm_takedown(&dev_priv->mm.stolen); 1507 1508 intel_cleanup_overlay(dev); 1509 1510 if (!I915_NEED_GFX_HWS(dev)) 1511 i915_free_hws(dev); 1512 } 1513 1514 i915_gem_unload(dev); 1515 1516 mtx_destroy(&dev_priv->irq_lock); 1517 1518 if (dev_priv->tq != NULL) 1519 taskqueue_free(dev_priv->tq); 1520 1521 bus_generic_detach(dev->dev); 1522 drm_rmmap(dev, dev_priv->mmio_map); 1523 intel_teardown_gmbus(dev); 1524 1525 mtx_destroy(&dev_priv->dpio_lock); 1526 mtx_destroy(&dev_priv->error_lock); 1527 mtx_destroy(&dev_priv->error_completion_lock); 1528 mtx_destroy(&dev_priv->rps_lock); 1529 free(dev->dev_private, DRM_MEM_DRIVER); 1530 1531 return 0; 1532} 1533 1534int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1535{ 1536 struct drm_i915_file_private *file_priv; 1537 1538 file_priv = malloc(sizeof(*file_priv), DRM_MEM_FILES, M_WAITOK | M_ZERO); 1539 1540 file->driver_priv = file_priv; 1541 1542 mtx_init(&file_priv->mm.lck, "915fp", NULL, MTX_DEF); 1543 INIT_LIST_HEAD(&file_priv->mm.request_list); 1544 1545 drm_gem_names_init(&file_priv->context_idr); 1546 1547 return 0; 1548} 1549 1550/** 1551 * i915_driver_lastclose - clean up after all DRM clients have exited 1552 * @dev: DRM device 1553 * 1554 * Take care of cleaning up after all DRM clients have exited. In the 1555 * mode setting case, we want to restore the kernel's initial mode (just 1556 * in case the last client left us in a bad state). 1557 * 1558 * Additionally, in the non-mode setting case, we'll tear down the GTT 1559 * and DMA structures, since the kernel won't be using them, and clea 1560 * up any GEM state. 1561 */ 1562void i915_driver_lastclose(struct drm_device * dev) 1563{ 1564 drm_i915_private_t *dev_priv = dev->dev_private; 1565 1566 /* On gen6+ we refuse to init without kms enabled, but then the drm core 1567 * goes right around and calls lastclose. Check for this and don't clean 1568 * up anything. */ 1569 if (!dev_priv) 1570 return; 1571 1572 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1573#if 1 1574 KIB_NOTYET(); 1575#else 1576 drm_fb_helper_restore(); 1577 vga_switcheroo_process_delayed_switch(); 1578#endif 1579 return; 1580 } 1581 1582 i915_gem_lastclose(dev); 1583 1584 i915_dma_cleanup(dev); 1585} 1586 1587void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1588{ 1589 i915_gem_context_close(dev, file_priv); 1590 i915_gem_release(dev, file_priv); 1591} 1592 1593void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1594{ 1595 struct drm_i915_file_private *file_priv = file->driver_priv; 1596 1597 mtx_destroy(&file_priv->mm.lck); 1598 free(file_priv, DRM_MEM_FILES); 1599} 1600 1601struct drm_ioctl_desc i915_ioctls[] = { 1602 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1603 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1604 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 1605 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1606 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1607 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1608 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 1609 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1610 DRM_IOCTL_DEF(DRM_I915_ALLOC, drm_noop, DRM_AUTH), 1611 DRM_IOCTL_DEF(DRM_I915_FREE, drm_noop, DRM_AUTH), 1612 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1613 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1614 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1615 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 1616 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 1617 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1618 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1619 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1620 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED), 1621 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED), 1622 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1623 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1624 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 1625 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 1626 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1627 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1628 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 1629 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 1630 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 1631 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 1632 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 1633 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 1634 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 1635 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 1636 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 1637 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 1638 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 1639 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 1640 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1641 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1642 DRM_IOCTL_DEF(DRM_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1643 DRM_IOCTL_DEF(DRM_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 1644 DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), 1645 DRM_IOCTL_DEF(DRM_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), 1646}; 1647 1648#ifdef COMPAT_FREEBSD32 1649extern struct drm_ioctl_desc i915_compat_ioctls[]; 1650extern int i915_compat_ioctls_nr; 1651#endif 1652 1653struct drm_driver i915_driver_info = { 1654 /* 1655 * FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on 1656 * Linux. 1657 */ 1658 .driver_features = 1659 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 1660 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, 1661 1662 .buf_priv_size = sizeof(drm_i915_private_t), 1663 .load = i915_driver_load, 1664 .open = i915_driver_open, 1665 .unload = i915_driver_unload, 1666 .preclose = i915_driver_preclose, 1667 .lastclose = i915_driver_lastclose, 1668 .postclose = i915_driver_postclose, 1669 .device_is_agp = i915_driver_device_is_agp, 1670 .master_create = i915_master_create, 1671 .master_destroy = i915_master_destroy, 1672 .gem_init_object = i915_gem_init_object, 1673 .gem_free_object = i915_gem_free_object, 1674 .gem_pager_ops = &i915_gem_pager_ops, 1675 .dumb_create = i915_gem_dumb_create, 1676 .dumb_map_offset = i915_gem_mmap_gtt, 1677 .dumb_destroy = i915_gem_dumb_destroy, 1678 .sysctl_init = i915_sysctl_init, 1679 .sysctl_cleanup = i915_sysctl_cleanup, 1680 1681 .ioctls = i915_ioctls, 1682#ifdef COMPAT_FREEBSD32 1683 .compat_ioctls = i915_compat_ioctls, 1684 .num_compat_ioctls = &i915_compat_ioctls_nr, 1685#endif 1686 .num_ioctls = ARRAY_SIZE(i915_ioctls), 1687 1688 .name = DRIVER_NAME, 1689 .desc = DRIVER_DESC, 1690 .date = DRIVER_DATE, 1691 .major = DRIVER_MAJOR, 1692 .minor = DRIVER_MINOR, 1693 .patchlevel = DRIVER_PATCHLEVEL, 1694}; 1695 1696/* 1697 * This is really ugly: Because old userspace abused the linux agp interface to 1698 * manage the gtt, we need to claim that all intel devices are agp. For 1699 * otherwise the drm core refuses to initialize the agp support code. 1700 */ 1701int i915_driver_device_is_agp(struct drm_device * dev) 1702{ 1703 return 1; 1704} 1705