1/* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33#include <dev/drm2/drmP.h> 34#include <dev/drm2/drm.h> 35#include <dev/drm2/i915/i915_drm.h> 36#include <dev/drm2/i915/i915_drv.h> 37#include <dev/drm2/i915/intel_drv.h> 38#include <dev/drm2/i915/intel_ringbuffer.h> 39#include <sys/sched.h> 40#include <sys/sf_buf.h> 41 42/* 43 * 965+ support PIPE_CONTROL commands, which provide finer grained control 44 * over cache flushing. 45 */ 46struct pipe_control { 47 struct drm_i915_gem_object *obj; 48 volatile u32 *cpu_page; 49 u32 gtt_offset; 50}; 51 52void 53i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno) 54{ 55 struct drm_i915_private *dev_priv; 56 57 if (ring->trace_irq_seqno == 0) { 58 dev_priv = ring->dev->dev_private; 59 mtx_lock(&dev_priv->irq_lock); 60 if (ring->irq_get(ring)) 61 ring->trace_irq_seqno = seqno; 62 mtx_unlock(&dev_priv->irq_lock); 63 } 64} 65 66static inline int ring_space(struct intel_ring_buffer *ring) 67{ 68 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); 69 if (space < 0) 70 space += ring->size; 71 return space; 72} 73 74static int 75gen2_render_ring_flush(struct intel_ring_buffer *ring, 76 u32 invalidate_domains, 77 u32 flush_domains) 78{ 79 u32 cmd; 80 int ret; 81 82 cmd = MI_FLUSH; 83 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 84 cmd |= MI_NO_WRITE_FLUSH; 85 86 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 87 cmd |= MI_READ_FLUSH; 88 89 ret = intel_ring_begin(ring, 2); 90 if (ret) 91 return ret; 92 93 intel_ring_emit(ring, cmd); 94 intel_ring_emit(ring, MI_NOOP); 95 intel_ring_advance(ring); 96 97 return 0; 98} 99 100static int 101gen4_render_ring_flush(struct intel_ring_buffer *ring, 102 u32 invalidate_domains, 103 u32 flush_domains) 104{ 105 struct drm_device *dev = ring->dev; 106 uint32_t cmd; 107 int ret; 108 109 /* 110 * read/write caches: 111 * 112 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 113 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 114 * also flushed at 2d versus 3d pipeline switches. 115 * 116 * read-only caches: 117 * 118 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 119 * MI_READ_FLUSH is set, and is always flushed on 965. 120 * 121 * I915_GEM_DOMAIN_COMMAND may not exist? 122 * 123 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 124 * invalidated when MI_EXE_FLUSH is set. 125 * 126 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 127 * invalidated with every MI_FLUSH. 128 * 129 * TLBs: 130 * 131 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 132 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 133 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 134 * are flushed at any MI_FLUSH. 135 */ 136 137 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 138 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 139 cmd &= ~MI_NO_WRITE_FLUSH; 140 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 141 cmd |= MI_EXE_FLUSH; 142 143 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 144 (IS_G4X(dev) || IS_GEN5(dev))) 145 cmd |= MI_INVALIDATE_ISP; 146 147 ret = intel_ring_begin(ring, 2); 148 if (ret) 149 return ret; 150 151 intel_ring_emit(ring, cmd); 152 intel_ring_emit(ring, MI_NOOP); 153 intel_ring_advance(ring); 154 155 return 0; 156} 157 158/** 159 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 160 * implementing two workarounds on gen6. From section 1.4.7.1 161 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 162 * 163 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 164 * produced by non-pipelined state commands), software needs to first 165 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 166 * 0. 167 * 168 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 169 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 170 * 171 * And the workaround for these two requires this workaround first: 172 * 173 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 174 * BEFORE the pipe-control with a post-sync op and no write-cache 175 * flushes. 176 * 177 * And this last workaround is tricky because of the requirements on 178 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 179 * volume 2 part 1: 180 * 181 * "1 of the following must also be set: 182 * - Render Target Cache Flush Enable ([12] of DW1) 183 * - Depth Cache Flush Enable ([0] of DW1) 184 * - Stall at Pixel Scoreboard ([1] of DW1) 185 * - Depth Stall ([13] of DW1) 186 * - Post-Sync Operation ([13] of DW1) 187 * - Notify Enable ([8] of DW1)" 188 * 189 * The cache flushes require the workaround flush that triggered this 190 * one, so we can't use it. Depth stall would trigger the same. 191 * Post-sync nonzero is what triggered this second workaround, so we 192 * can't use that one either. Notify enable is IRQs, which aren't 193 * really our business. That leaves only stall at scoreboard. 194 */ 195static int 196intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 197{ 198 struct pipe_control *pc = ring->private; 199 u32 scratch_addr = pc->gtt_offset + 128; 200 int ret; 201 202 203 ret = intel_ring_begin(ring, 6); 204 if (ret) 205 return ret; 206 207 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 208 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 209 PIPE_CONTROL_STALL_AT_SCOREBOARD); 210 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 211 intel_ring_emit(ring, 0); /* low dword */ 212 intel_ring_emit(ring, 0); /* high dword */ 213 intel_ring_emit(ring, MI_NOOP); 214 intel_ring_advance(ring); 215 216 ret = intel_ring_begin(ring, 6); 217 if (ret) 218 return ret; 219 220 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 221 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 222 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 223 intel_ring_emit(ring, 0); 224 intel_ring_emit(ring, 0); 225 intel_ring_emit(ring, MI_NOOP); 226 intel_ring_advance(ring); 227 228 return 0; 229} 230 231static int 232gen6_render_ring_flush(struct intel_ring_buffer *ring, 233 u32 invalidate_domains, u32 flush_domains) 234{ 235 u32 flags = 0; 236 struct pipe_control *pc = ring->private; 237 u32 scratch_addr = pc->gtt_offset + 128; 238 int ret; 239 240 /* Force SNB workarounds for PIPE_CONTROL flushes */ 241 intel_emit_post_sync_nonzero_flush(ring); 242 243 /* Just flush everything. Experiments have shown that reducing the 244 * number of bits based on the write domains has little performance 245 * impact. 246 */ 247 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 248 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 249 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 250 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 251 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 252 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 253 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 254 255 ret = intel_ring_begin(ring, 6); 256 if (ret) 257 return ret; 258 259 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 260 intel_ring_emit(ring, flags); 261 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 262 intel_ring_emit(ring, 0); /* lower dword */ 263 intel_ring_emit(ring, 0); /* uppwer dword */ 264 intel_ring_emit(ring, MI_NOOP); 265 intel_ring_advance(ring); 266 267 return 0; 268} 269 270static void ring_write_tail(struct intel_ring_buffer *ring, 271 uint32_t value) 272{ 273 drm_i915_private_t *dev_priv = ring->dev->dev_private; 274 I915_WRITE_TAIL(ring, value); 275} 276 277u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) 278{ 279 drm_i915_private_t *dev_priv = ring->dev->dev_private; 280 uint32_t acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? 281 RING_ACTHD(ring->mmio_base) : ACTHD; 282 283 return I915_READ(acthd_reg); 284} 285 286static int init_ring_common(struct intel_ring_buffer *ring) 287{ 288 drm_i915_private_t *dev_priv = ring->dev->dev_private; 289 struct drm_i915_gem_object *obj = ring->obj; 290 uint32_t head; 291 292 /* Stop the ring if it's running. */ 293 I915_WRITE_CTL(ring, 0); 294 I915_WRITE_HEAD(ring, 0); 295 ring->write_tail(ring, 0); 296 297 /* Initialize the ring. */ 298 I915_WRITE_START(ring, obj->gtt_offset); 299 head = I915_READ_HEAD(ring) & HEAD_ADDR; 300 301 /* G45 ring initialization fails to reset head to zero */ 302 if (head != 0) { 303 DRM_DEBUG("%s head not reset to zero " 304 "ctl %08x head %08x tail %08x start %08x\n", 305 ring->name, 306 I915_READ_CTL(ring), 307 I915_READ_HEAD(ring), 308 I915_READ_TAIL(ring), 309 I915_READ_START(ring)); 310 311 I915_WRITE_HEAD(ring, 0); 312 313 if (I915_READ_HEAD(ring) & HEAD_ADDR) { 314 DRM_ERROR("failed to set %s head to zero " 315 "ctl %08x head %08x tail %08x start %08x\n", 316 ring->name, 317 I915_READ_CTL(ring), 318 I915_READ_HEAD(ring), 319 I915_READ_TAIL(ring), 320 I915_READ_START(ring)); 321 } 322 } 323 324 I915_WRITE_CTL(ring, 325 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 326 | RING_VALID); 327 328 /* If the head is still not zero, the ring is dead */ 329 if (_intel_wait_for(ring->dev, 330 (I915_READ_CTL(ring) & RING_VALID) != 0 && 331 I915_READ_START(ring) == obj->gtt_offset && 332 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 333 50, 1, "915rii")) { 334 DRM_ERROR("%s initialization failed " 335 "ctl %08x head %08x tail %08x start %08x\n", 336 ring->name, 337 I915_READ_CTL(ring), 338 I915_READ_HEAD(ring), 339 I915_READ_TAIL(ring), 340 I915_READ_START(ring)); 341 return -EIO; 342 } 343 344 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 345 i915_kernel_lost_context(ring->dev); 346 else { 347 ring->head = I915_READ_HEAD(ring); 348 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 349 ring->space = ring_space(ring); 350 } 351 352 return 0; 353} 354 355static int 356init_pipe_control(struct intel_ring_buffer *ring) 357{ 358 struct pipe_control *pc; 359 struct drm_i915_gem_object *obj; 360 int ret; 361 362 if (ring->private) 363 return 0; 364 365 pc = malloc(sizeof(*pc), DRM_I915_GEM, M_WAITOK); 366 if (!pc) 367 return -ENOMEM; 368 369 obj = i915_gem_alloc_object(ring->dev, 4096); 370 if (obj == NULL) { 371 DRM_ERROR("Failed to allocate seqno page\n"); 372 ret = -ENOMEM; 373 goto err; 374 } 375 376 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 377 378 ret = i915_gem_object_pin(obj, 4096, true); 379 if (ret) 380 goto err_unref; 381 382 pc->gtt_offset = obj->gtt_offset; 383 pc->cpu_page = (uint32_t *)kva_alloc(PAGE_SIZE); 384 if (pc->cpu_page == NULL) 385 goto err_unpin; 386 pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1); 387 pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page, 388 (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE); 389 390 pc->obj = obj; 391 ring->private = pc; 392 return 0; 393 394err_unpin: 395 i915_gem_object_unpin(obj); 396err_unref: 397 drm_gem_object_unreference(&obj->base); 398err: 399 free(pc, DRM_I915_GEM); 400 return ret; 401} 402 403static void 404cleanup_pipe_control(struct intel_ring_buffer *ring) 405{ 406 struct pipe_control *pc = ring->private; 407 struct drm_i915_gem_object *obj; 408 409 if (!ring->private) 410 return; 411 412 obj = pc->obj; 413 pmap_qremove((vm_offset_t)pc->cpu_page, 1); 414 kva_free((uintptr_t)pc->cpu_page, PAGE_SIZE); 415 i915_gem_object_unpin(obj); 416 drm_gem_object_unreference(&obj->base); 417 418 free(pc, DRM_I915_GEM); 419 ring->private = NULL; 420} 421 422static int init_render_ring(struct intel_ring_buffer *ring) 423{ 424 struct drm_device *dev = ring->dev; 425 struct drm_i915_private *dev_priv = dev->dev_private; 426 int ret = init_ring_common(ring); 427 428 if (INTEL_INFO(dev)->gen > 3) { 429 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 430 if (IS_GEN7(dev)) 431 I915_WRITE(GFX_MODE_GEN7, 432 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 433 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 434 } 435 436 if (INTEL_INFO(dev)->gen >= 5) { 437 ret = init_pipe_control(ring); 438 if (ret) 439 return ret; 440 } 441 442 443 if (IS_GEN6(dev)) { 444 /* From the Sandybridge PRM, volume 1 part 3, page 24: 445 * "If this bit is set, STCunit will have LRA as replacement 446 * policy. [...] This bit must be reset. LRA replacement 447 * policy is not supported." 448 */ 449 I915_WRITE(CACHE_MODE_0, 450 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 451 452 /* This is not explicitly set for GEN6, so read the register. 453 * see intel_ring_mi_set_context() for why we care. 454 * TODO: consider explicitly setting the bit for GEN5 455 */ 456 ring->itlb_before_ctx_switch = 457 !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); 458 } 459 460 if (INTEL_INFO(dev)->gen >= 6) 461 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 462 463 return ret; 464} 465 466static void render_ring_cleanup(struct intel_ring_buffer *ring) 467{ 468 if (!ring->private) 469 return; 470 471 cleanup_pipe_control(ring); 472} 473 474static void 475update_mboxes(struct intel_ring_buffer *ring, 476 u32 seqno, 477 u32 mmio_offset) 478{ 479 intel_ring_emit(ring, MI_SEMAPHORE_MBOX | 480 MI_SEMAPHORE_GLOBAL_GTT | 481 MI_SEMAPHORE_REGISTER | 482 MI_SEMAPHORE_UPDATE); 483 intel_ring_emit(ring, seqno); 484 intel_ring_emit(ring, mmio_offset); 485} 486 487/** 488 * gen6_add_request - Update the semaphore mailbox registers 489 * 490 * @ring - ring that is adding a request 491 * @seqno - return seqno stuck into the ring 492 * 493 * Update the mailbox registers in the *other* rings with the current seqno. 494 * This acts like a signal in the canonical semaphore. 495 */ 496static int 497gen6_add_request(struct intel_ring_buffer *ring, 498 u32 *seqno) 499{ 500 u32 mbox1_reg; 501 u32 mbox2_reg; 502 int ret; 503 504 ret = intel_ring_begin(ring, 10); 505 if (ret) 506 return ret; 507 508 mbox1_reg = ring->signal_mbox[0]; 509 mbox2_reg = ring->signal_mbox[1]; 510 511 *seqno = i915_gem_next_request_seqno(ring); 512 513 update_mboxes(ring, *seqno, mbox1_reg); 514 update_mboxes(ring, *seqno, mbox2_reg); 515 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 516 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 517 intel_ring_emit(ring, *seqno); 518 intel_ring_emit(ring, MI_USER_INTERRUPT); 519 intel_ring_advance(ring); 520 521 return 0; 522} 523 524/** 525 * intel_ring_sync - sync the waiter to the signaller on seqno 526 * 527 * @waiter - ring that is waiting 528 * @signaller - ring which has, or will signal 529 * @seqno - seqno which the waiter will block on 530 */ 531static int 532gen6_ring_sync(struct intel_ring_buffer *waiter, 533 struct intel_ring_buffer *signaller, 534 u32 seqno) 535{ 536 int ret; 537 u32 dw1 = MI_SEMAPHORE_MBOX | 538 MI_SEMAPHORE_COMPARE | 539 MI_SEMAPHORE_REGISTER; 540 541 /* Throughout all of the GEM code, seqno passed implies our current 542 * seqno is >= the last seqno executed. However for hardware the 543 * comparison is strictly greater than. 544 */ 545 seqno -= 1; 546 547 if (signaller->semaphore_register[waiter->id] == 548 MI_SEMAPHORE_SYNC_INVALID) 549 printf("gen6_ring_sync semaphore_register %d invalid\n", 550 waiter->id); 551 552 ret = intel_ring_begin(waiter, 4); 553 if (ret) 554 return ret; 555 556 intel_ring_emit(waiter, 557 dw1 | signaller->semaphore_register[waiter->id]); 558 intel_ring_emit(waiter, seqno); 559 intel_ring_emit(waiter, 0); 560 intel_ring_emit(waiter, MI_NOOP); 561 intel_ring_advance(waiter); 562 563 return 0; 564} 565 566int render_ring_sync_to(struct intel_ring_buffer *waiter, 567 struct intel_ring_buffer *signaller, u32 seqno); 568int gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, 569 struct intel_ring_buffer *signaller, u32 seqno); 570int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, 571 struct intel_ring_buffer *signaller, u32 seqno); 572 573#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 574do { \ 575 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 576 PIPE_CONTROL_DEPTH_STALL); \ 577 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 578 intel_ring_emit(ring__, 0); \ 579 intel_ring_emit(ring__, 0); \ 580} while (0) 581 582static int 583pc_render_add_request(struct intel_ring_buffer *ring, 584 uint32_t *result) 585{ 586 u32 seqno = i915_gem_next_request_seqno(ring); 587 struct pipe_control *pc = ring->private; 588 u32 scratch_addr = pc->gtt_offset + 128; 589 int ret; 590 591 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 592 * incoherent with writes to memory, i.e. completely fubar, 593 * so we need to use PIPE_NOTIFY instead. 594 * 595 * However, we also need to workaround the qword write 596 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 597 * memory before requesting an interrupt. 598 */ 599 ret = intel_ring_begin(ring, 32); 600 if (ret) 601 return ret; 602 603 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 604 PIPE_CONTROL_WRITE_FLUSH | 605 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 606 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 607 intel_ring_emit(ring, seqno); 608 intel_ring_emit(ring, 0); 609 PIPE_CONTROL_FLUSH(ring, scratch_addr); 610 scratch_addr += 128; /* write to separate cachelines */ 611 PIPE_CONTROL_FLUSH(ring, scratch_addr); 612 scratch_addr += 128; 613 PIPE_CONTROL_FLUSH(ring, scratch_addr); 614 scratch_addr += 128; 615 PIPE_CONTROL_FLUSH(ring, scratch_addr); 616 scratch_addr += 128; 617 PIPE_CONTROL_FLUSH(ring, scratch_addr); 618 scratch_addr += 128; 619 PIPE_CONTROL_FLUSH(ring, scratch_addr); 620 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 621 PIPE_CONTROL_WRITE_FLUSH | 622 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 623 PIPE_CONTROL_NOTIFY); 624 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 625 intel_ring_emit(ring, seqno); 626 intel_ring_emit(ring, 0); 627 intel_ring_advance(ring); 628 629 *result = seqno; 630 return 0; 631} 632 633static u32 634gen6_ring_get_seqno(struct intel_ring_buffer *ring) 635{ 636 struct drm_device *dev = ring->dev; 637 638 /* Workaround to force correct ordering between irq and seqno writes on 639 * ivb (and maybe also on snb) by reading from a CS register (like 640 * ACTHD) before reading the status page. */ 641 if (/* IS_GEN6(dev) || */IS_GEN7(dev)) 642 intel_ring_get_active_head(ring); 643 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 644} 645 646static uint32_t 647ring_get_seqno(struct intel_ring_buffer *ring) 648{ 649 if (ring->status_page.page_addr == NULL) 650 return (-1); 651 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 652} 653 654static uint32_t 655pc_render_get_seqno(struct intel_ring_buffer *ring) 656{ 657 struct pipe_control *pc = ring->private; 658 if (pc != NULL) 659 return pc->cpu_page[0]; 660 else 661 return (-1); 662} 663 664static bool 665gen5_ring_get_irq(struct intel_ring_buffer *ring) 666{ 667 struct drm_device *dev = ring->dev; 668 drm_i915_private_t *dev_priv = dev->dev_private; 669 670 if (!dev->irq_enabled) 671 return false; 672 673 mtx_assert(&dev_priv->irq_lock, MA_OWNED); 674 if (ring->irq_refcount++ == 0) { 675 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 676 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 677 POSTING_READ(GTIMR); 678 } 679 680 return true; 681} 682 683static void 684gen5_ring_put_irq(struct intel_ring_buffer *ring) 685{ 686 struct drm_device *dev = ring->dev; 687 drm_i915_private_t *dev_priv = dev->dev_private; 688 689 mtx_assert(&dev_priv->irq_lock, MA_OWNED); 690 if (--ring->irq_refcount == 0) { 691 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 692 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 693 POSTING_READ(GTIMR); 694 } 695} 696 697static bool 698i9xx_ring_get_irq(struct intel_ring_buffer *ring) 699{ 700 struct drm_device *dev = ring->dev; 701 drm_i915_private_t *dev_priv = dev->dev_private; 702 703 if (!dev->irq_enabled) 704 return false; 705 706 mtx_assert(&dev_priv->irq_lock, MA_OWNED); 707 if (ring->irq_refcount++ == 0) { 708 dev_priv->irq_mask &= ~ring->irq_enable_mask; 709 I915_WRITE(IMR, dev_priv->irq_mask); 710 POSTING_READ(IMR); 711 } 712 713 return true; 714} 715 716static void 717i9xx_ring_put_irq(struct intel_ring_buffer *ring) 718{ 719 struct drm_device *dev = ring->dev; 720 drm_i915_private_t *dev_priv = dev->dev_private; 721 722 mtx_assert(&dev_priv->irq_lock, MA_OWNED); 723 if (--ring->irq_refcount == 0) { 724 dev_priv->irq_mask |= ring->irq_enable_mask; 725 I915_WRITE(IMR, dev_priv->irq_mask); 726 POSTING_READ(IMR); 727 } 728} 729 730static bool 731i8xx_ring_get_irq(struct intel_ring_buffer *ring) 732{ 733 struct drm_device *dev = ring->dev; 734 drm_i915_private_t *dev_priv = dev->dev_private; 735 736 if (!dev->irq_enabled) 737 return false; 738 739 mtx_assert(&dev_priv->irq_lock, MA_OWNED); 740 if (ring->irq_refcount++ == 0) { 741 dev_priv->irq_mask &= ~ring->irq_enable_mask; 742 I915_WRITE16(IMR, dev_priv->irq_mask); 743 POSTING_READ16(IMR); 744 } 745 746 return true; 747} 748 749static void 750i8xx_ring_put_irq(struct intel_ring_buffer *ring) 751{ 752 struct drm_device *dev = ring->dev; 753 drm_i915_private_t *dev_priv = dev->dev_private; 754 755 mtx_assert(&dev_priv->irq_lock, MA_OWNED); 756 if (--ring->irq_refcount == 0) { 757 dev_priv->irq_mask |= ring->irq_enable_mask; 758 I915_WRITE16(IMR, dev_priv->irq_mask); 759 POSTING_READ16(IMR); 760 } 761} 762 763void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 764{ 765 struct drm_device *dev = ring->dev; 766 drm_i915_private_t *dev_priv = dev->dev_private; 767 uint32_t mmio = 0; 768 769 /* The ring status page addresses are no longer next to the rest of 770 * the ring registers as of gen7. 771 */ 772 if (IS_GEN7(dev)) { 773 switch (ring->id) { 774 case RCS: 775 mmio = RENDER_HWS_PGA_GEN7; 776 break; 777 case BCS: 778 mmio = BLT_HWS_PGA_GEN7; 779 break; 780 case VCS: 781 mmio = BSD_HWS_PGA_GEN7; 782 break; 783 } 784 } else if (IS_GEN6(dev)) { 785 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 786 } else { 787 mmio = RING_HWS_PGA(ring->mmio_base); 788 } 789 790 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 791 POSTING_READ(mmio); 792} 793 794static int 795bsd_ring_flush(struct intel_ring_buffer *ring, 796 uint32_t invalidate_domains, 797 uint32_t flush_domains) 798{ 799 int ret; 800 801 ret = intel_ring_begin(ring, 2); 802 if (ret) 803 return ret; 804 805 intel_ring_emit(ring, MI_FLUSH); 806 intel_ring_emit(ring, MI_NOOP); 807 intel_ring_advance(ring); 808 return 0; 809} 810 811static int 812i9xx_add_request(struct intel_ring_buffer *ring, 813 u32 *result) 814{ 815 u32 seqno; 816 int ret; 817 818 ret = intel_ring_begin(ring, 4); 819 if (ret) 820 return ret; 821 822 seqno = i915_gem_next_request_seqno(ring); 823 824 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 825 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 826 intel_ring_emit(ring, seqno); 827 intel_ring_emit(ring, MI_USER_INTERRUPT); 828 intel_ring_advance(ring); 829 830 *result = seqno; 831 return 0; 832} 833 834static bool 835gen6_ring_get_irq(struct intel_ring_buffer *ring) 836{ 837 struct drm_device *dev = ring->dev; 838 drm_i915_private_t *dev_priv = dev->dev_private; 839 840 if (!dev->irq_enabled) 841 return false; 842 843 gen6_gt_force_wake_get(dev_priv); 844 845 mtx_assert(&dev_priv->irq_lock, MA_OWNED); 846 if (ring->irq_refcount++ == 0) { 847 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 848 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 849 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 850 POSTING_READ(GTIMR); 851 } 852 853 return true; 854} 855 856static void 857gen6_ring_put_irq(struct intel_ring_buffer *ring) 858{ 859 struct drm_device *dev = ring->dev; 860 drm_i915_private_t *dev_priv = dev->dev_private; 861 862 mtx_assert(&dev_priv->irq_lock, MA_OWNED); 863 if (--ring->irq_refcount == 0) { 864 I915_WRITE_IMR(ring, ~0); 865 dev_priv->gt_irq_mask |= ring->irq_enable_mask; 866 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 867 POSTING_READ(GTIMR); 868 } 869 870 gen6_gt_force_wake_put(dev_priv); 871} 872 873static int 874i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 875{ 876 int ret; 877 878 ret = intel_ring_begin(ring, 2); 879 if (ret) 880 return ret; 881 882 intel_ring_emit(ring, 883 MI_BATCH_BUFFER_START | 884 MI_BATCH_GTT | 885 MI_BATCH_NON_SECURE_I965); 886 intel_ring_emit(ring, offset); 887 intel_ring_advance(ring); 888 889 return 0; 890} 891 892static int 893i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 894 u32 offset, u32 len) 895{ 896 int ret; 897 898 ret = intel_ring_begin(ring, 4); 899 if (ret) 900 return ret; 901 902 intel_ring_emit(ring, MI_BATCH_BUFFER); 903 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 904 intel_ring_emit(ring, offset + len - 8); 905 intel_ring_emit(ring, 0); 906 intel_ring_advance(ring); 907 908 return 0; 909} 910 911static int 912i915_dispatch_execbuffer(struct intel_ring_buffer *ring, 913 u32 offset, u32 len) 914{ 915 int ret; 916 917 ret = intel_ring_begin(ring, 2); 918 if (ret) 919 return ret; 920 921 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 922 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 923 intel_ring_advance(ring); 924 925 return 0; 926} 927 928static void cleanup_status_page(struct intel_ring_buffer *ring) 929{ 930 struct drm_i915_gem_object *obj; 931 932 obj = ring->status_page.obj; 933 if (obj == NULL) 934 return; 935 936 pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1); 937 kva_free((vm_offset_t)ring->status_page.page_addr, 938 PAGE_SIZE); 939 i915_gem_object_unpin(obj); 940 drm_gem_object_unreference(&obj->base); 941 ring->status_page.obj = NULL; 942} 943 944static int init_status_page(struct intel_ring_buffer *ring) 945{ 946 struct drm_device *dev = ring->dev; 947 struct drm_i915_gem_object *obj; 948 int ret; 949 950 obj = i915_gem_alloc_object(dev, 4096); 951 if (obj == NULL) { 952 DRM_ERROR("Failed to allocate status page\n"); 953 ret = -ENOMEM; 954 goto err; 955 } 956 957 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 958 959 ret = i915_gem_object_pin(obj, 4096, true); 960 if (ret != 0) { 961 goto err_unref; 962 } 963 964 ring->status_page.gfx_addr = obj->gtt_offset; 965 ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE); 966 if (ring->status_page.page_addr == NULL) { 967 goto err_unpin; 968 } 969 pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0], 970 1); 971 pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr, 972 (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE); 973 ring->status_page.obj = obj; 974 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 975 976 intel_ring_setup_status_page(ring); 977 DRM_DEBUG("i915: init_status_page %s hws offset: 0x%08x\n", 978 ring->name, ring->status_page.gfx_addr); 979 980 return 0; 981 982err_unpin: 983 i915_gem_object_unpin(obj); 984err_unref: 985 drm_gem_object_unreference(&obj->base); 986err: 987 return ret; 988} 989 990static int intel_init_ring_buffer(struct drm_device *dev, 991 struct intel_ring_buffer *ring) 992{ 993 struct drm_i915_gem_object *obj; 994 int ret; 995 996 ring->dev = dev; 997 INIT_LIST_HEAD(&ring->active_list); 998 INIT_LIST_HEAD(&ring->request_list); 999 INIT_LIST_HEAD(&ring->gpu_write_list); 1000 ring->size = 32 * PAGE_SIZE; 1001 1002 if (I915_NEED_GFX_HWS(dev)) { 1003 ret = init_status_page(ring); 1004 if (ret) 1005 return ret; 1006 } 1007 1008 obj = i915_gem_alloc_object(dev, ring->size); 1009 if (obj == NULL) { 1010 DRM_ERROR("Failed to allocate ringbuffer\n"); 1011 ret = -ENOMEM; 1012 goto err_hws; 1013 } 1014 1015 ring->obj = obj; 1016 1017 ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 1018 if (ret) 1019 goto err_unref; 1020 1021 ring->virtual_start = pmap_mapdev_attr( 1022 dev->agp->base + obj->gtt_offset, ring->size, 1023 VM_MEMATTR_WRITE_COMBINING); 1024 if (ring->virtual_start == NULL) { 1025 DRM_ERROR("Failed to map ringbuffer.\n"); 1026 ret = -EINVAL; 1027 goto err_unpin; 1028 } 1029 1030 ret = ring->init(ring); 1031 if (ret) 1032 goto err_unmap; 1033 1034 /* Workaround an erratum on the i830 which causes a hang if 1035 * the TAIL pointer points to within the last 2 cachelines 1036 * of the buffer. 1037 */ 1038 ring->effective_size = ring->size; 1039 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1040 ring->effective_size -= 128; 1041 1042 return 0; 1043 1044err_unmap: 1045 pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size); 1046err_unpin: 1047 i915_gem_object_unpin(obj); 1048err_unref: 1049 drm_gem_object_unreference(&obj->base); 1050 ring->obj = NULL; 1051err_hws: 1052 cleanup_status_page(ring); 1053 return ret; 1054} 1055 1056void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) 1057{ 1058 struct drm_i915_private *dev_priv; 1059 int ret; 1060 1061 if (ring->obj == NULL) 1062 return; 1063 1064 /* Disable the ring buffer. The ring must be idle at this point */ 1065 dev_priv = ring->dev->dev_private; 1066 ret = intel_wait_ring_idle(ring); 1067 I915_WRITE_CTL(ring, 0); 1068 1069 pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size); 1070 1071 i915_gem_object_unpin(ring->obj); 1072 drm_gem_object_unreference(&ring->obj->base); 1073 ring->obj = NULL; 1074 1075 if (ring->cleanup) 1076 ring->cleanup(ring); 1077 1078 cleanup_status_page(ring); 1079} 1080 1081static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 1082{ 1083 uint32_t *virt; 1084 int rem = ring->size - ring->tail; 1085 1086 if (ring->space < rem) { 1087 int ret = intel_wait_ring_buffer(ring, rem); 1088 if (ret) 1089 return ret; 1090 } 1091 1092 virt = (uint32_t *)((char *)ring->virtual_start + ring->tail); 1093 rem /= 4; 1094 while (rem--) 1095 *virt++ = MI_NOOP; 1096 1097 ring->tail = 0; 1098 ring->space = ring_space(ring); 1099 1100 return 0; 1101} 1102 1103static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1104{ 1105 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1106 bool was_interruptible; 1107 int ret; 1108 1109 /* XXX As we have not yet audited all the paths to check that 1110 * they are ready for ERESTARTSYS from intel_ring_begin, do not 1111 * allow us to be interruptible by a signal. 1112 */ 1113 was_interruptible = dev_priv->mm.interruptible; 1114 dev_priv->mm.interruptible = false; 1115 1116 ret = i915_wait_request(ring, seqno); 1117 1118 dev_priv->mm.interruptible = was_interruptible; 1119 if (!ret) 1120 i915_gem_retire_requests_ring(ring); 1121 1122 return ret; 1123} 1124 1125static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) 1126{ 1127 struct drm_i915_gem_request *request; 1128 u32 seqno = 0; 1129 int ret; 1130 1131 i915_gem_retire_requests_ring(ring); 1132 1133 if (ring->last_retired_head != -1) { 1134 ring->head = ring->last_retired_head; 1135 ring->last_retired_head = -1; 1136 ring->space = ring_space(ring); 1137 if (ring->space >= n) 1138 return 0; 1139 } 1140 1141 list_for_each_entry(request, &ring->request_list, list) { 1142 int space; 1143 1144 if (request->tail == -1) 1145 continue; 1146 1147 space = request->tail - (ring->tail + 8); 1148 if (space < 0) 1149 space += ring->size; 1150 if (space >= n) { 1151 seqno = request->seqno; 1152 break; 1153 } 1154 1155 /* Consume this request in case we need more space than 1156 * is available and so need to prevent a race between 1157 * updating last_retired_head and direct reads of 1158 * I915_RING_HEAD. It also provides a nice sanity check. 1159 */ 1160 request->tail = -1; 1161 } 1162 1163 if (seqno == 0) 1164 return -ENOSPC; 1165 1166 ret = intel_ring_wait_seqno(ring, seqno); 1167 if (ret) 1168 return ret; 1169 1170 if (ring->last_retired_head == -1) 1171 return -ENOSPC; 1172 1173 ring->head = ring->last_retired_head; 1174 ring->last_retired_head = -1; 1175 ring->space = ring_space(ring); 1176 if (ring->space < n) 1177 return -ENOSPC; 1178 1179 return 0; 1180} 1181 1182int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 1183{ 1184 struct drm_device *dev = ring->dev; 1185 struct drm_i915_private *dev_priv = dev->dev_private; 1186 int end; 1187 int ret; 1188 1189 ret = intel_ring_wait_request(ring, n); 1190 if (ret != -ENOSPC) 1191 return ret; 1192 1193 CTR1(KTR_DRM, "ring_wait_begin %s", ring->name); 1194 /* With GEM the hangcheck timer should kick us out of the loop, 1195 * leaving it early runs the risk of corrupting GEM state (due 1196 * to running on almost untested codepaths). But on resume 1197 * timers don't work yet, so prevent a complete hang in that 1198 * case by choosing an insanely large timeout. */ 1199 end = ticks + hz * 60; 1200 1201 do { 1202 ring->head = I915_READ_HEAD(ring); 1203 ring->space = ring_space(ring); 1204 if (ring->space >= n) { 1205 CTR1(KTR_DRM, "ring_wait_end %s", ring->name); 1206 return 0; 1207 } 1208 1209 if (dev->primary->master) { 1210 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1211 if (master_priv->sarea_priv) 1212 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1213 } 1214 1215 pause("915rng", 1); 1216 if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) { 1217 CTR1(KTR_DRM, "ring_wait_end %s wedged", ring->name); 1218 return -EAGAIN; 1219 } 1220 } while (!time_after(ticks, end)); 1221 CTR1(KTR_DRM, "ring_wait_end %s busy", ring->name); 1222 return -EBUSY; 1223} 1224 1225int intel_ring_begin(struct intel_ring_buffer *ring, 1226 int num_dwords) 1227{ 1228 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1229 int n = 4*num_dwords; 1230 int ret; 1231 1232 if (atomic_load_acq_int(&dev_priv->mm.wedged)) 1233 return -EIO; 1234 1235 if (ring->tail + n > ring->effective_size) { 1236 ret = intel_wrap_ring_buffer(ring); 1237 if (ret != 0) 1238 return ret; 1239 } 1240 1241 if (ring->space < n) { 1242 ret = intel_wait_ring_buffer(ring, n); 1243 if (ret != 0) 1244 return ret; 1245 } 1246 1247 ring->space -= n; 1248 return 0; 1249} 1250 1251void intel_ring_advance(struct intel_ring_buffer *ring) 1252{ 1253 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1254 1255 ring->tail &= ring->size - 1; 1256 if (dev_priv->stop_rings & intel_ring_flag(ring)) 1257 return; 1258 ring->write_tail(ring, ring->tail); 1259} 1260 1261 1262static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1263 u32 value) 1264{ 1265 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1266 1267 /* Every tail move must follow the sequence below */ 1268 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1269 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | 1270 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); 1271 I915_WRITE(GEN6_BSD_RNCID, 0x0); 1272 1273 if (_intel_wait_for(ring->dev, 1274 (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 1275 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50, 1276 true, "915g6i") != 0) 1277 DRM_ERROR("timed out waiting for IDLE Indicator\n"); 1278 1279 I915_WRITE_TAIL(ring, value); 1280 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1281 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | 1282 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 1283} 1284 1285static int gen6_ring_flush(struct intel_ring_buffer *ring, 1286 uint32_t invalidate, uint32_t flush) 1287{ 1288 uint32_t cmd; 1289 int ret; 1290 1291 ret = intel_ring_begin(ring, 4); 1292 if (ret) 1293 return ret; 1294 1295 cmd = MI_FLUSH_DW; 1296 if (invalidate & I915_GEM_GPU_DOMAINS) 1297 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 1298 intel_ring_emit(ring, cmd); 1299 intel_ring_emit(ring, 0); 1300 intel_ring_emit(ring, 0); 1301 intel_ring_emit(ring, MI_NOOP); 1302 intel_ring_advance(ring); 1303 return 0; 1304} 1305 1306static int 1307gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1308 uint32_t offset, uint32_t len) 1309{ 1310 int ret; 1311 1312 ret = intel_ring_begin(ring, 2); 1313 if (ret) 1314 return ret; 1315 1316 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); 1317 /* bit0-7 is the length on GEN6+ */ 1318 intel_ring_emit(ring, offset); 1319 intel_ring_advance(ring); 1320 1321 return 0; 1322} 1323 1324/* Blitter support (SandyBridge+) */ 1325 1326static int blt_ring_flush(struct intel_ring_buffer *ring, 1327 u32 invalidate, u32 flush) 1328{ 1329 u32 cmd; 1330 int ret; 1331 1332 ret = intel_ring_begin(ring, 4); 1333 if (ret) 1334 return ret; 1335 1336 cmd = MI_FLUSH_DW; 1337 if (invalidate & I915_GEM_DOMAIN_RENDER) 1338 cmd |= MI_INVALIDATE_TLB; 1339 intel_ring_emit(ring, cmd); 1340 intel_ring_emit(ring, 0); 1341 intel_ring_emit(ring, 0); 1342 intel_ring_emit(ring, MI_NOOP); 1343 intel_ring_advance(ring); 1344 return 0; 1345} 1346 1347int intel_init_render_ring_buffer(struct drm_device *dev) 1348{ 1349 drm_i915_private_t *dev_priv = dev->dev_private; 1350 struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; 1351 1352 ring->name = "render ring"; 1353 ring->id = RCS; 1354 ring->mmio_base = RENDER_RING_BASE; 1355 1356 if (INTEL_INFO(dev)->gen >= 6) { 1357 ring->add_request = gen6_add_request; 1358 ring->flush = gen6_render_ring_flush; 1359 ring->irq_get = gen6_ring_get_irq; 1360 ring->irq_put = gen6_ring_put_irq; 1361 ring->irq_enable_mask = GT_USER_INTERRUPT; 1362 ring->get_seqno = gen6_ring_get_seqno; 1363 ring->sync_to = gen6_ring_sync; 1364 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; 1365 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; 1366 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; 1367 ring->signal_mbox[0] = GEN6_VRSYNC; 1368 ring->signal_mbox[1] = GEN6_BRSYNC; 1369 } else if (IS_GEN5(dev)) { 1370 ring->add_request = pc_render_add_request; 1371 ring->flush = gen4_render_ring_flush; 1372 ring->get_seqno = pc_render_get_seqno; 1373 ring->irq_get = gen5_ring_get_irq; 1374 ring->irq_put = gen5_ring_put_irq; 1375 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; 1376 } else { 1377 ring->add_request = i9xx_add_request; 1378 if (INTEL_INFO(dev)->gen < 4) 1379 ring->flush = gen2_render_ring_flush; 1380 else 1381 ring->flush = gen4_render_ring_flush; 1382 ring->get_seqno = ring_get_seqno; 1383 if (IS_GEN2(dev)) { 1384 ring->irq_get = i8xx_ring_get_irq; 1385 ring->irq_put = i8xx_ring_put_irq; 1386 } else { 1387 ring->irq_get = i9xx_ring_get_irq; 1388 ring->irq_put = i9xx_ring_put_irq; 1389 } 1390 ring->irq_enable_mask = I915_USER_INTERRUPT; 1391 } 1392 ring->write_tail = ring_write_tail; 1393 if (INTEL_INFO(dev)->gen >= 6) 1394 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1395 else if (INTEL_INFO(dev)->gen >= 4) 1396 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1397 else if (IS_I830(dev) || IS_845G(dev)) 1398 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 1399 else 1400 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 1401 ring->init = init_render_ring; 1402 ring->cleanup = render_ring_cleanup; 1403 1404 1405 if (!I915_NEED_GFX_HWS(dev)) { 1406 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1407 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1408 } 1409 1410 return intel_init_ring_buffer(dev, ring); 1411} 1412 1413int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 1414{ 1415 drm_i915_private_t *dev_priv = dev->dev_private; 1416 struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; 1417 1418 ring->name = "render ring"; 1419 ring->id = RCS; 1420 ring->mmio_base = RENDER_RING_BASE; 1421 1422 if (INTEL_INFO(dev)->gen >= 6) { 1423 /* non-kms not supported on gen6+ */ 1424 return -ENODEV; 1425 } 1426 1427 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 1428 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 1429 * the special gen5 functions. */ 1430 ring->add_request = i9xx_add_request; 1431 if (INTEL_INFO(dev)->gen < 4) 1432 ring->flush = gen2_render_ring_flush; 1433 else 1434 ring->flush = gen4_render_ring_flush; 1435 ring->get_seqno = ring_get_seqno; 1436 if (IS_GEN2(dev)) { 1437 ring->irq_get = i8xx_ring_get_irq; 1438 ring->irq_put = i8xx_ring_put_irq; 1439 } else { 1440 ring->irq_get = i9xx_ring_get_irq; 1441 ring->irq_put = i9xx_ring_put_irq; 1442 } 1443 ring->irq_enable_mask = I915_USER_INTERRUPT; 1444 ring->write_tail = ring_write_tail; 1445 if (INTEL_INFO(dev)->gen >= 4) 1446 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1447 else if (IS_I830(dev) || IS_845G(dev)) 1448 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 1449 else 1450 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 1451 ring->init = init_render_ring; 1452 ring->cleanup = render_ring_cleanup; 1453 1454 ring->dev = dev; 1455 INIT_LIST_HEAD(&ring->active_list); 1456 INIT_LIST_HEAD(&ring->request_list); 1457 INIT_LIST_HEAD(&ring->gpu_write_list); 1458 1459 ring->size = size; 1460 ring->effective_size = ring->size; 1461 if (IS_I830(ring->dev)) 1462 ring->effective_size -= 128; 1463 1464 ring->virtual_start = pmap_mapdev_attr(start, size, 1465 VM_MEMATTR_WRITE_COMBINING); 1466 if (ring->virtual_start == NULL) { 1467 DRM_ERROR("can not ioremap virtual address for" 1468 " ring buffer\n"); 1469 return -ENOMEM; 1470 } 1471 1472 return 0; 1473} 1474 1475int intel_init_bsd_ring_buffer(struct drm_device *dev) 1476{ 1477 drm_i915_private_t *dev_priv = dev->dev_private; 1478 struct intel_ring_buffer *ring = &dev_priv->rings[VCS]; 1479 1480 ring->name = "bsd ring"; 1481 ring->id = VCS; 1482 1483 ring->write_tail = ring_write_tail; 1484 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1485 ring->mmio_base = GEN6_BSD_RING_BASE; 1486 /* gen6 bsd needs a special wa for tail updates */ 1487 if (IS_GEN6(dev)) 1488 ring->write_tail = gen6_bsd_ring_write_tail; 1489 ring->flush = gen6_ring_flush; 1490 ring->add_request = gen6_add_request; 1491 ring->get_seqno = gen6_ring_get_seqno; 1492 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; 1493 ring->irq_get = gen6_ring_get_irq; 1494 ring->irq_put = gen6_ring_put_irq; 1495 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1496 ring->sync_to = gen6_ring_sync; 1497 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; 1498 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; 1499 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; 1500 ring->signal_mbox[0] = GEN6_RVSYNC; 1501 ring->signal_mbox[1] = GEN6_BVSYNC; 1502 } else { 1503 ring->mmio_base = BSD_RING_BASE; 1504 ring->flush = bsd_ring_flush; 1505 ring->add_request = i9xx_add_request; 1506 ring->get_seqno = ring_get_seqno; 1507 if (IS_GEN5(dev)) { 1508 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1509 ring->irq_get = gen5_ring_get_irq; 1510 ring->irq_put = gen5_ring_put_irq; 1511 } else { 1512 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1513 ring->irq_get = i9xx_ring_get_irq; 1514 ring->irq_put = i9xx_ring_put_irq; 1515 } 1516 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1517 } 1518 ring->init = init_ring_common; 1519 1520 1521 return intel_init_ring_buffer(dev, ring); 1522} 1523 1524int intel_init_blt_ring_buffer(struct drm_device *dev) 1525{ 1526 drm_i915_private_t *dev_priv = dev->dev_private; 1527 struct intel_ring_buffer *ring = &dev_priv->rings[BCS]; 1528 1529 ring->name = "blitter ring"; 1530 ring->id = BCS; 1531 1532 ring->mmio_base = BLT_RING_BASE; 1533 ring->write_tail = ring_write_tail; 1534 ring->flush = blt_ring_flush; 1535 ring->add_request = gen6_add_request; 1536 ring->get_seqno = gen6_ring_get_seqno; 1537 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; 1538 ring->irq_get = gen6_ring_get_irq; 1539 ring->irq_put = gen6_ring_put_irq; 1540 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1541 ring->sync_to = gen6_ring_sync; 1542 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; 1543 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; 1544 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; 1545 ring->signal_mbox[0] = GEN6_RBSYNC; 1546 ring->signal_mbox[1] = GEN6_VBSYNC; 1547 ring->init = init_ring_common; 1548 1549 return intel_init_ring_buffer(dev, ring); 1550} 1551