1235783Skib/* 2235783Skib * Copyright �� 2008-2010 Intel Corporation 3235783Skib * 4235783Skib * Permission is hereby granted, free of charge, to any person obtaining a 5235783Skib * copy of this software and associated documentation files (the "Software"), 6235783Skib * to deal in the Software without restriction, including without limitation 7235783Skib * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8235783Skib * and/or sell copies of the Software, and to permit persons to whom the 9235783Skib * Software is furnished to do so, subject to the following conditions: 10235783Skib * 11235783Skib * The above copyright notice and this permission notice (including the next 12235783Skib * paragraph) shall be included in all copies or substantial portions of the 13235783Skib * Software. 14235783Skib * 15235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16235783Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17235783Skib * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18235783Skib * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19235783Skib * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20235783Skib * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21235783Skib * IN THE SOFTWARE. 22235783Skib * 23235783Skib * Authors: 24235783Skib * Eric Anholt <eric@anholt.net> 25235783Skib * Zou Nan hai <nanhai.zou@intel.com> 26235783Skib * Xiang Hai hao<haihao.xiang@intel.com> 27235783Skib * 28235783Skib */ 29235783Skib 30235783Skib#include <sys/cdefs.h> 31235783Skib__FBSDID("$FreeBSD$"); 32235783Skib 33235783Skib#include <dev/drm2/drmP.h> 34235783Skib#include <dev/drm2/drm.h> 35235783Skib#include <dev/drm2/i915/i915_drm.h> 36235783Skib#include <dev/drm2/i915/i915_drv.h> 37235783Skib#include <dev/drm2/i915/intel_drv.h> 38235783Skib#include <dev/drm2/i915/intel_ringbuffer.h> 39235783Skib#include <sys/sched.h> 40235783Skib#include <sys/sf_buf.h> 41235783Skib 42235783Skib/* 43235783Skib * 965+ support PIPE_CONTROL commands, which provide finer grained control 44235783Skib * over cache flushing. 45235783Skib */ 46235783Skibstruct pipe_control { 47235783Skib struct drm_i915_gem_object *obj; 48235783Skib volatile u32 *cpu_page; 49235783Skib u32 gtt_offset; 50235783Skib}; 51235783Skib 52235783Skibvoid 53235783Skibi915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno) 54235783Skib{ 55280369Skib struct drm_i915_private *dev_priv; 56235783Skib 57235783Skib if (ring->trace_irq_seqno == 0) { 58280369Skib dev_priv = ring->dev->dev_private; 59280369Skib mtx_lock(&dev_priv->irq_lock); 60235783Skib if (ring->irq_get(ring)) 61235783Skib ring->trace_irq_seqno = seqno; 62280369Skib mtx_unlock(&dev_priv->irq_lock); 63235783Skib } 64235783Skib} 65235783Skib 66235783Skibstatic inline int ring_space(struct intel_ring_buffer *ring) 67235783Skib{ 68235783Skib int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); 69235783Skib if (space < 0) 70235783Skib space += ring->size; 71235783Skib return space; 72235783Skib} 73235783Skib 74235783Skibstatic int 75280369Skibgen2_render_ring_flush(struct intel_ring_buffer *ring, 76280369Skib u32 invalidate_domains, 77280369Skib u32 flush_domains) 78235783Skib{ 79280369Skib u32 cmd; 80280369Skib int ret; 81280369Skib 82280369Skib cmd = MI_FLUSH; 83280369Skib if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 84280369Skib cmd |= MI_NO_WRITE_FLUSH; 85280369Skib 86280369Skib if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 87280369Skib cmd |= MI_READ_FLUSH; 88280369Skib 89280369Skib ret = intel_ring_begin(ring, 2); 90280369Skib if (ret) 91280369Skib return ret; 92280369Skib 93280369Skib intel_ring_emit(ring, cmd); 94280369Skib intel_ring_emit(ring, MI_NOOP); 95280369Skib intel_ring_advance(ring); 96280369Skib 97280369Skib return 0; 98280369Skib} 99280369Skib 100280369Skibstatic int 101280369Skibgen4_render_ring_flush(struct intel_ring_buffer *ring, 102280369Skib u32 invalidate_domains, 103280369Skib u32 flush_domains) 104280369Skib{ 105235783Skib struct drm_device *dev = ring->dev; 106235783Skib uint32_t cmd; 107235783Skib int ret; 108235783Skib 109235783Skib /* 110235783Skib * read/write caches: 111235783Skib * 112235783Skib * I915_GEM_DOMAIN_RENDER is always invalidated, but is 113235783Skib * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 114235783Skib * also flushed at 2d versus 3d pipeline switches. 115235783Skib * 116235783Skib * read-only caches: 117235783Skib * 118235783Skib * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 119235783Skib * MI_READ_FLUSH is set, and is always flushed on 965. 120235783Skib * 121235783Skib * I915_GEM_DOMAIN_COMMAND may not exist? 122235783Skib * 123235783Skib * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 124235783Skib * invalidated when MI_EXE_FLUSH is set. 125235783Skib * 126235783Skib * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 127235783Skib * invalidated with every MI_FLUSH. 128235783Skib * 129235783Skib * TLBs: 130235783Skib * 131235783Skib * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 132235783Skib * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 133235783Skib * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 134235783Skib * are flushed at any MI_FLUSH. 135235783Skib */ 136235783Skib 137235783Skib cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 138280369Skib if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 139235783Skib cmd &= ~MI_NO_WRITE_FLUSH; 140235783Skib if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 141235783Skib cmd |= MI_EXE_FLUSH; 142235783Skib 143235783Skib if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 144235783Skib (IS_G4X(dev) || IS_GEN5(dev))) 145235783Skib cmd |= MI_INVALIDATE_ISP; 146235783Skib 147235783Skib ret = intel_ring_begin(ring, 2); 148235783Skib if (ret) 149235783Skib return ret; 150235783Skib 151235783Skib intel_ring_emit(ring, cmd); 152235783Skib intel_ring_emit(ring, MI_NOOP); 153235783Skib intel_ring_advance(ring); 154235783Skib 155235783Skib return 0; 156235783Skib} 157235783Skib 158235783Skib/** 159235783Skib * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 160235783Skib * implementing two workarounds on gen6. From section 1.4.7.1 161235783Skib * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 162235783Skib * 163235783Skib * [DevSNB-C+{W/A}] Before any depth stall flush (including those 164235783Skib * produced by non-pipelined state commands), software needs to first 165235783Skib * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 166235783Skib * 0. 167235783Skib * 168235783Skib * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 169235783Skib * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 170235783Skib * 171235783Skib * And the workaround for these two requires this workaround first: 172235783Skib * 173235783Skib * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 174235783Skib * BEFORE the pipe-control with a post-sync op and no write-cache 175235783Skib * flushes. 176235783Skib * 177235783Skib * And this last workaround is tricky because of the requirements on 178235783Skib * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 179235783Skib * volume 2 part 1: 180235783Skib * 181235783Skib * "1 of the following must also be set: 182235783Skib * - Render Target Cache Flush Enable ([12] of DW1) 183235783Skib * - Depth Cache Flush Enable ([0] of DW1) 184235783Skib * - Stall at Pixel Scoreboard ([1] of DW1) 185235783Skib * - Depth Stall ([13] of DW1) 186235783Skib * - Post-Sync Operation ([13] of DW1) 187235783Skib * - Notify Enable ([8] of DW1)" 188235783Skib * 189235783Skib * The cache flushes require the workaround flush that triggered this 190235783Skib * one, so we can't use it. Depth stall would trigger the same. 191235783Skib * Post-sync nonzero is what triggered this second workaround, so we 192235783Skib * can't use that one either. Notify enable is IRQs, which aren't 193235783Skib * really our business. That leaves only stall at scoreboard. 194235783Skib */ 195235783Skibstatic int 196235783Skibintel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 197235783Skib{ 198235783Skib struct pipe_control *pc = ring->private; 199235783Skib u32 scratch_addr = pc->gtt_offset + 128; 200235783Skib int ret; 201235783Skib 202235783Skib 203235783Skib ret = intel_ring_begin(ring, 6); 204235783Skib if (ret) 205235783Skib return ret; 206235783Skib 207235783Skib intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 208235783Skib intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 209235783Skib PIPE_CONTROL_STALL_AT_SCOREBOARD); 210235783Skib intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 211235783Skib intel_ring_emit(ring, 0); /* low dword */ 212235783Skib intel_ring_emit(ring, 0); /* high dword */ 213235783Skib intel_ring_emit(ring, MI_NOOP); 214235783Skib intel_ring_advance(ring); 215235783Skib 216235783Skib ret = intel_ring_begin(ring, 6); 217235783Skib if (ret) 218235783Skib return ret; 219235783Skib 220235783Skib intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 221235783Skib intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 222235783Skib intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 223235783Skib intel_ring_emit(ring, 0); 224235783Skib intel_ring_emit(ring, 0); 225235783Skib intel_ring_emit(ring, MI_NOOP); 226235783Skib intel_ring_advance(ring); 227235783Skib 228235783Skib return 0; 229235783Skib} 230235783Skib 231235783Skibstatic int 232235783Skibgen6_render_ring_flush(struct intel_ring_buffer *ring, 233235783Skib u32 invalidate_domains, u32 flush_domains) 234235783Skib{ 235235783Skib u32 flags = 0; 236235783Skib struct pipe_control *pc = ring->private; 237235783Skib u32 scratch_addr = pc->gtt_offset + 128; 238235783Skib int ret; 239235783Skib 240235783Skib /* Force SNB workarounds for PIPE_CONTROL flushes */ 241235783Skib intel_emit_post_sync_nonzero_flush(ring); 242235783Skib 243235783Skib /* Just flush everything. Experiments have shown that reducing the 244235783Skib * number of bits based on the write domains has little performance 245235783Skib * impact. 246235783Skib */ 247235783Skib flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 248235783Skib flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 249235783Skib flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 250235783Skib flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 251235783Skib flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 252235783Skib flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 253235783Skib flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 254235783Skib 255235783Skib ret = intel_ring_begin(ring, 6); 256235783Skib if (ret) 257235783Skib return ret; 258235783Skib 259235783Skib intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 260235783Skib intel_ring_emit(ring, flags); 261235783Skib intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 262235783Skib intel_ring_emit(ring, 0); /* lower dword */ 263235783Skib intel_ring_emit(ring, 0); /* uppwer dword */ 264235783Skib intel_ring_emit(ring, MI_NOOP); 265235783Skib intel_ring_advance(ring); 266235783Skib 267235783Skib return 0; 268235783Skib} 269235783Skib 270235783Skibstatic void ring_write_tail(struct intel_ring_buffer *ring, 271235783Skib uint32_t value) 272235783Skib{ 273235783Skib drm_i915_private_t *dev_priv = ring->dev->dev_private; 274235783Skib I915_WRITE_TAIL(ring, value); 275235783Skib} 276235783Skib 277235783Skibu32 intel_ring_get_active_head(struct intel_ring_buffer *ring) 278235783Skib{ 279235783Skib drm_i915_private_t *dev_priv = ring->dev->dev_private; 280235783Skib uint32_t acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? 281235783Skib RING_ACTHD(ring->mmio_base) : ACTHD; 282235783Skib 283235783Skib return I915_READ(acthd_reg); 284235783Skib} 285235783Skib 286235783Skibstatic int init_ring_common(struct intel_ring_buffer *ring) 287235783Skib{ 288235783Skib drm_i915_private_t *dev_priv = ring->dev->dev_private; 289235783Skib struct drm_i915_gem_object *obj = ring->obj; 290235783Skib uint32_t head; 291235783Skib 292235783Skib /* Stop the ring if it's running. */ 293235783Skib I915_WRITE_CTL(ring, 0); 294235783Skib I915_WRITE_HEAD(ring, 0); 295235783Skib ring->write_tail(ring, 0); 296235783Skib 297235783Skib /* Initialize the ring. */ 298235783Skib I915_WRITE_START(ring, obj->gtt_offset); 299235783Skib head = I915_READ_HEAD(ring) & HEAD_ADDR; 300235783Skib 301235783Skib /* G45 ring initialization fails to reset head to zero */ 302235783Skib if (head != 0) { 303235783Skib DRM_DEBUG("%s head not reset to zero " 304235783Skib "ctl %08x head %08x tail %08x start %08x\n", 305235783Skib ring->name, 306235783Skib I915_READ_CTL(ring), 307235783Skib I915_READ_HEAD(ring), 308235783Skib I915_READ_TAIL(ring), 309235783Skib I915_READ_START(ring)); 310235783Skib 311235783Skib I915_WRITE_HEAD(ring, 0); 312235783Skib 313235783Skib if (I915_READ_HEAD(ring) & HEAD_ADDR) { 314235783Skib DRM_ERROR("failed to set %s head to zero " 315235783Skib "ctl %08x head %08x tail %08x start %08x\n", 316235783Skib ring->name, 317235783Skib I915_READ_CTL(ring), 318235783Skib I915_READ_HEAD(ring), 319235783Skib I915_READ_TAIL(ring), 320235783Skib I915_READ_START(ring)); 321235783Skib } 322235783Skib } 323235783Skib 324235783Skib I915_WRITE_CTL(ring, 325235783Skib ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 326235783Skib | RING_VALID); 327235783Skib 328235783Skib /* If the head is still not zero, the ring is dead */ 329235783Skib if (_intel_wait_for(ring->dev, 330235783Skib (I915_READ_CTL(ring) & RING_VALID) != 0 && 331235783Skib I915_READ_START(ring) == obj->gtt_offset && 332235783Skib (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 333235783Skib 50, 1, "915rii")) { 334235783Skib DRM_ERROR("%s initialization failed " 335235783Skib "ctl %08x head %08x tail %08x start %08x\n", 336235783Skib ring->name, 337235783Skib I915_READ_CTL(ring), 338235783Skib I915_READ_HEAD(ring), 339235783Skib I915_READ_TAIL(ring), 340235783Skib I915_READ_START(ring)); 341235783Skib return -EIO; 342235783Skib } 343235783Skib 344235783Skib if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 345235783Skib i915_kernel_lost_context(ring->dev); 346235783Skib else { 347235783Skib ring->head = I915_READ_HEAD(ring); 348235783Skib ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 349235783Skib ring->space = ring_space(ring); 350235783Skib } 351235783Skib 352235783Skib return 0; 353235783Skib} 354235783Skib 355235783Skibstatic int 356235783Skibinit_pipe_control(struct intel_ring_buffer *ring) 357235783Skib{ 358235783Skib struct pipe_control *pc; 359235783Skib struct drm_i915_gem_object *obj; 360235783Skib int ret; 361235783Skib 362235783Skib if (ring->private) 363235783Skib return 0; 364235783Skib 365235783Skib pc = malloc(sizeof(*pc), DRM_I915_GEM, M_WAITOK); 366235783Skib if (!pc) 367235783Skib return -ENOMEM; 368235783Skib 369235783Skib obj = i915_gem_alloc_object(ring->dev, 4096); 370235783Skib if (obj == NULL) { 371235783Skib DRM_ERROR("Failed to allocate seqno page\n"); 372235783Skib ret = -ENOMEM; 373235783Skib goto err; 374235783Skib } 375235783Skib 376235783Skib i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 377235783Skib 378235783Skib ret = i915_gem_object_pin(obj, 4096, true); 379235783Skib if (ret) 380235783Skib goto err_unref; 381235783Skib 382235783Skib pc->gtt_offset = obj->gtt_offset; 383254025Sjeff pc->cpu_page = (uint32_t *)kva_alloc(PAGE_SIZE); 384235783Skib if (pc->cpu_page == NULL) 385235783Skib goto err_unpin; 386235783Skib pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1); 387235783Skib pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page, 388273136Skib (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE); 389235783Skib 390235783Skib pc->obj = obj; 391235783Skib ring->private = pc; 392235783Skib return 0; 393235783Skib 394235783Skiberr_unpin: 395235783Skib i915_gem_object_unpin(obj); 396235783Skiberr_unref: 397235783Skib drm_gem_object_unreference(&obj->base); 398235783Skiberr: 399235783Skib free(pc, DRM_I915_GEM); 400235783Skib return ret; 401235783Skib} 402235783Skib 403235783Skibstatic void 404235783Skibcleanup_pipe_control(struct intel_ring_buffer *ring) 405235783Skib{ 406235783Skib struct pipe_control *pc = ring->private; 407235783Skib struct drm_i915_gem_object *obj; 408235783Skib 409235783Skib if (!ring->private) 410235783Skib return; 411235783Skib 412235783Skib obj = pc->obj; 413235783Skib pmap_qremove((vm_offset_t)pc->cpu_page, 1); 414254025Sjeff kva_free((uintptr_t)pc->cpu_page, PAGE_SIZE); 415235783Skib i915_gem_object_unpin(obj); 416235783Skib drm_gem_object_unreference(&obj->base); 417235783Skib 418235783Skib free(pc, DRM_I915_GEM); 419235783Skib ring->private = NULL; 420235783Skib} 421235783Skib 422235783Skibstatic int init_render_ring(struct intel_ring_buffer *ring) 423235783Skib{ 424235783Skib struct drm_device *dev = ring->dev; 425235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 426235783Skib int ret = init_ring_common(ring); 427235783Skib 428235783Skib if (INTEL_INFO(dev)->gen > 3) { 429280369Skib I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 430235783Skib if (IS_GEN7(dev)) 431235783Skib I915_WRITE(GFX_MODE_GEN7, 432280369Skib _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 433280369Skib _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 434235783Skib } 435235783Skib 436235783Skib if (INTEL_INFO(dev)->gen >= 5) { 437235783Skib ret = init_pipe_control(ring); 438235783Skib if (ret) 439235783Skib return ret; 440235783Skib } 441235783Skib 442235783Skib 443235783Skib if (IS_GEN6(dev)) { 444235783Skib /* From the Sandybridge PRM, volume 1 part 3, page 24: 445235783Skib * "If this bit is set, STCunit will have LRA as replacement 446235783Skib * policy. [...] This bit must be reset. LRA replacement 447235783Skib * policy is not supported." 448235783Skib */ 449235783Skib I915_WRITE(CACHE_MODE_0, 450280369Skib _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 451271816Sdumbbell 452271816Sdumbbell /* This is not explicitly set for GEN6, so read the register. 453271816Sdumbbell * see intel_ring_mi_set_context() for why we care. 454271816Sdumbbell * TODO: consider explicitly setting the bit for GEN5 455271816Sdumbbell */ 456271816Sdumbbell ring->itlb_before_ctx_switch = 457271816Sdumbbell !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); 458235783Skib } 459235783Skib 460280369Skib if (INTEL_INFO(dev)->gen >= 6) 461280369Skib I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 462235783Skib 463235783Skib return ret; 464235783Skib} 465235783Skib 466235783Skibstatic void render_ring_cleanup(struct intel_ring_buffer *ring) 467235783Skib{ 468235783Skib if (!ring->private) 469235783Skib return; 470235783Skib 471235783Skib cleanup_pipe_control(ring); 472235783Skib} 473235783Skib 474235783Skibstatic void 475235783Skibupdate_mboxes(struct intel_ring_buffer *ring, 476235783Skib u32 seqno, 477235783Skib u32 mmio_offset) 478235783Skib{ 479235783Skib intel_ring_emit(ring, MI_SEMAPHORE_MBOX | 480235783Skib MI_SEMAPHORE_GLOBAL_GTT | 481235783Skib MI_SEMAPHORE_REGISTER | 482235783Skib MI_SEMAPHORE_UPDATE); 483235783Skib intel_ring_emit(ring, seqno); 484235783Skib intel_ring_emit(ring, mmio_offset); 485235783Skib} 486235783Skib 487235783Skib/** 488235783Skib * gen6_add_request - Update the semaphore mailbox registers 489235783Skib * 490235783Skib * @ring - ring that is adding a request 491235783Skib * @seqno - return seqno stuck into the ring 492235783Skib * 493235783Skib * Update the mailbox registers in the *other* rings with the current seqno. 494235783Skib * This acts like a signal in the canonical semaphore. 495235783Skib */ 496235783Skibstatic int 497235783Skibgen6_add_request(struct intel_ring_buffer *ring, 498235783Skib u32 *seqno) 499235783Skib{ 500235783Skib u32 mbox1_reg; 501235783Skib u32 mbox2_reg; 502235783Skib int ret; 503235783Skib 504235783Skib ret = intel_ring_begin(ring, 10); 505235783Skib if (ret) 506235783Skib return ret; 507235783Skib 508235783Skib mbox1_reg = ring->signal_mbox[0]; 509235783Skib mbox2_reg = ring->signal_mbox[1]; 510235783Skib 511235783Skib *seqno = i915_gem_next_request_seqno(ring); 512235783Skib 513235783Skib update_mboxes(ring, *seqno, mbox1_reg); 514235783Skib update_mboxes(ring, *seqno, mbox2_reg); 515235783Skib intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 516235783Skib intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 517235783Skib intel_ring_emit(ring, *seqno); 518235783Skib intel_ring_emit(ring, MI_USER_INTERRUPT); 519235783Skib intel_ring_advance(ring); 520235783Skib 521235783Skib return 0; 522235783Skib} 523235783Skib 524235783Skib/** 525235783Skib * intel_ring_sync - sync the waiter to the signaller on seqno 526235783Skib * 527235783Skib * @waiter - ring that is waiting 528235783Skib * @signaller - ring which has, or will signal 529235783Skib * @seqno - seqno which the waiter will block on 530235783Skib */ 531235783Skibstatic int 532280369Skibgen6_ring_sync(struct intel_ring_buffer *waiter, 533280369Skib struct intel_ring_buffer *signaller, 534280369Skib u32 seqno) 535235783Skib{ 536235783Skib int ret; 537235783Skib u32 dw1 = MI_SEMAPHORE_MBOX | 538235783Skib MI_SEMAPHORE_COMPARE | 539235783Skib MI_SEMAPHORE_REGISTER; 540235783Skib 541280369Skib /* Throughout all of the GEM code, seqno passed implies our current 542280369Skib * seqno is >= the last seqno executed. However for hardware the 543280369Skib * comparison is strictly greater than. 544280369Skib */ 545280369Skib seqno -= 1; 546280369Skib 547280369Skib if (signaller->semaphore_register[waiter->id] == 548280369Skib MI_SEMAPHORE_SYNC_INVALID) 549280369Skib printf("gen6_ring_sync semaphore_register %d invalid\n", 550280369Skib waiter->id); 551280369Skib 552235783Skib ret = intel_ring_begin(waiter, 4); 553235783Skib if (ret) 554235783Skib return ret; 555235783Skib 556280369Skib intel_ring_emit(waiter, 557280369Skib dw1 | signaller->semaphore_register[waiter->id]); 558235783Skib intel_ring_emit(waiter, seqno); 559235783Skib intel_ring_emit(waiter, 0); 560235783Skib intel_ring_emit(waiter, MI_NOOP); 561235783Skib intel_ring_advance(waiter); 562235783Skib 563235783Skib return 0; 564235783Skib} 565235783Skib 566235783Skibint render_ring_sync_to(struct intel_ring_buffer *waiter, 567235783Skib struct intel_ring_buffer *signaller, u32 seqno); 568235783Skibint gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, 569235783Skib struct intel_ring_buffer *signaller, u32 seqno); 570235783Skibint gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, 571235783Skib struct intel_ring_buffer *signaller, u32 seqno); 572235783Skib 573235783Skib#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 574235783Skibdo { \ 575235783Skib intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 576235783Skib PIPE_CONTROL_DEPTH_STALL); \ 577235783Skib intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 578235783Skib intel_ring_emit(ring__, 0); \ 579235783Skib intel_ring_emit(ring__, 0); \ 580235783Skib} while (0) 581235783Skib 582235783Skibstatic int 583235783Skibpc_render_add_request(struct intel_ring_buffer *ring, 584235783Skib uint32_t *result) 585235783Skib{ 586235783Skib u32 seqno = i915_gem_next_request_seqno(ring); 587235783Skib struct pipe_control *pc = ring->private; 588235783Skib u32 scratch_addr = pc->gtt_offset + 128; 589235783Skib int ret; 590235783Skib 591235783Skib /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 592235783Skib * incoherent with writes to memory, i.e. completely fubar, 593235783Skib * so we need to use PIPE_NOTIFY instead. 594235783Skib * 595235783Skib * However, we also need to workaround the qword write 596235783Skib * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 597235783Skib * memory before requesting an interrupt. 598235783Skib */ 599235783Skib ret = intel_ring_begin(ring, 32); 600235783Skib if (ret) 601235783Skib return ret; 602235783Skib 603235783Skib intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 604235783Skib PIPE_CONTROL_WRITE_FLUSH | 605235783Skib PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 606235783Skib intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 607235783Skib intel_ring_emit(ring, seqno); 608235783Skib intel_ring_emit(ring, 0); 609235783Skib PIPE_CONTROL_FLUSH(ring, scratch_addr); 610235783Skib scratch_addr += 128; /* write to separate cachelines */ 611235783Skib PIPE_CONTROL_FLUSH(ring, scratch_addr); 612235783Skib scratch_addr += 128; 613235783Skib PIPE_CONTROL_FLUSH(ring, scratch_addr); 614235783Skib scratch_addr += 128; 615235783Skib PIPE_CONTROL_FLUSH(ring, scratch_addr); 616235783Skib scratch_addr += 128; 617235783Skib PIPE_CONTROL_FLUSH(ring, scratch_addr); 618235783Skib scratch_addr += 128; 619235783Skib PIPE_CONTROL_FLUSH(ring, scratch_addr); 620235783Skib intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 621235783Skib PIPE_CONTROL_WRITE_FLUSH | 622235783Skib PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 623235783Skib PIPE_CONTROL_NOTIFY); 624235783Skib intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 625235783Skib intel_ring_emit(ring, seqno); 626235783Skib intel_ring_emit(ring, 0); 627235783Skib intel_ring_advance(ring); 628235783Skib 629235783Skib *result = seqno; 630235783Skib return 0; 631235783Skib} 632235783Skib 633280369Skibstatic u32 634235783Skibgen6_ring_get_seqno(struct intel_ring_buffer *ring) 635235783Skib{ 636235783Skib struct drm_device *dev = ring->dev; 637235783Skib 638235783Skib /* Workaround to force correct ordering between irq and seqno writes on 639235783Skib * ivb (and maybe also on snb) by reading from a CS register (like 640235783Skib * ACTHD) before reading the status page. */ 641235783Skib if (/* IS_GEN6(dev) || */IS_GEN7(dev)) 642235783Skib intel_ring_get_active_head(ring); 643235783Skib return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 644235783Skib} 645235783Skib 646235783Skibstatic uint32_t 647235783Skibring_get_seqno(struct intel_ring_buffer *ring) 648235783Skib{ 649235783Skib if (ring->status_page.page_addr == NULL) 650235783Skib return (-1); 651235783Skib return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 652235783Skib} 653235783Skib 654235783Skibstatic uint32_t 655235783Skibpc_render_get_seqno(struct intel_ring_buffer *ring) 656235783Skib{ 657235783Skib struct pipe_control *pc = ring->private; 658235783Skib if (pc != NULL) 659235783Skib return pc->cpu_page[0]; 660235783Skib else 661235783Skib return (-1); 662235783Skib} 663235783Skib 664280369Skibstatic bool 665280369Skibgen5_ring_get_irq(struct intel_ring_buffer *ring) 666235783Skib{ 667280369Skib struct drm_device *dev = ring->dev; 668280369Skib drm_i915_private_t *dev_priv = dev->dev_private; 669280369Skib 670280369Skib if (!dev->irq_enabled) 671280369Skib return false; 672280369Skib 673280369Skib mtx_assert(&dev_priv->irq_lock, MA_OWNED); 674280369Skib if (ring->irq_refcount++ == 0) { 675280369Skib dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 676280369Skib I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 677280369Skib POSTING_READ(GTIMR); 678280369Skib } 679280369Skib 680280369Skib return true; 681235783Skib} 682235783Skib 683235783Skibstatic void 684280369Skibgen5_ring_put_irq(struct intel_ring_buffer *ring) 685235783Skib{ 686280369Skib struct drm_device *dev = ring->dev; 687280369Skib drm_i915_private_t *dev_priv = dev->dev_private; 688280369Skib 689280369Skib mtx_assert(&dev_priv->irq_lock, MA_OWNED); 690280369Skib if (--ring->irq_refcount == 0) { 691280369Skib dev_priv->gt_irq_mask |= ring->irq_enable_mask; 692280369Skib I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 693280369Skib POSTING_READ(GTIMR); 694280369Skib } 695235783Skib} 696235783Skib 697280369Skibstatic bool 698280369Skibi9xx_ring_get_irq(struct intel_ring_buffer *ring) 699235783Skib{ 700280369Skib struct drm_device *dev = ring->dev; 701280369Skib drm_i915_private_t *dev_priv = dev->dev_private; 702280369Skib 703280369Skib if (!dev->irq_enabled) 704280369Skib return false; 705280369Skib 706280369Skib mtx_assert(&dev_priv->irq_lock, MA_OWNED); 707280369Skib if (ring->irq_refcount++ == 0) { 708280369Skib dev_priv->irq_mask &= ~ring->irq_enable_mask; 709280369Skib I915_WRITE(IMR, dev_priv->irq_mask); 710280369Skib POSTING_READ(IMR); 711280369Skib } 712280369Skib 713280369Skib return true; 714235783Skib} 715235783Skib 716235783Skibstatic void 717280369Skibi9xx_ring_put_irq(struct intel_ring_buffer *ring) 718235783Skib{ 719280369Skib struct drm_device *dev = ring->dev; 720280369Skib drm_i915_private_t *dev_priv = dev->dev_private; 721280369Skib 722280369Skib mtx_assert(&dev_priv->irq_lock, MA_OWNED); 723280369Skib if (--ring->irq_refcount == 0) { 724280369Skib dev_priv->irq_mask |= ring->irq_enable_mask; 725280369Skib I915_WRITE(IMR, dev_priv->irq_mask); 726280369Skib POSTING_READ(IMR); 727280369Skib } 728235783Skib} 729235783Skib 730235783Skibstatic bool 731280369Skibi8xx_ring_get_irq(struct intel_ring_buffer *ring) 732235783Skib{ 733235783Skib struct drm_device *dev = ring->dev; 734235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 735235783Skib 736235783Skib if (!dev->irq_enabled) 737235783Skib return false; 738235783Skib 739280369Skib mtx_assert(&dev_priv->irq_lock, MA_OWNED); 740235783Skib if (ring->irq_refcount++ == 0) { 741280369Skib dev_priv->irq_mask &= ~ring->irq_enable_mask; 742280369Skib I915_WRITE16(IMR, dev_priv->irq_mask); 743280369Skib POSTING_READ16(IMR); 744235783Skib } 745235783Skib 746235783Skib return true; 747235783Skib} 748235783Skib 749235783Skibstatic void 750280369Skibi8xx_ring_put_irq(struct intel_ring_buffer *ring) 751235783Skib{ 752235783Skib struct drm_device *dev = ring->dev; 753235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 754235783Skib 755280369Skib mtx_assert(&dev_priv->irq_lock, MA_OWNED); 756235783Skib if (--ring->irq_refcount == 0) { 757280369Skib dev_priv->irq_mask |= ring->irq_enable_mask; 758280369Skib I915_WRITE16(IMR, dev_priv->irq_mask); 759280369Skib POSTING_READ16(IMR); 760235783Skib } 761235783Skib} 762235783Skib 763235783Skibvoid intel_ring_setup_status_page(struct intel_ring_buffer *ring) 764235783Skib{ 765235783Skib struct drm_device *dev = ring->dev; 766235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 767235783Skib uint32_t mmio = 0; 768235783Skib 769235783Skib /* The ring status page addresses are no longer next to the rest of 770235783Skib * the ring registers as of gen7. 771235783Skib */ 772235783Skib if (IS_GEN7(dev)) { 773235783Skib switch (ring->id) { 774235783Skib case RCS: 775235783Skib mmio = RENDER_HWS_PGA_GEN7; 776235783Skib break; 777235783Skib case BCS: 778235783Skib mmio = BLT_HWS_PGA_GEN7; 779235783Skib break; 780235783Skib case VCS: 781235783Skib mmio = BSD_HWS_PGA_GEN7; 782235783Skib break; 783235783Skib } 784235783Skib } else if (IS_GEN6(dev)) { 785235783Skib mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 786235783Skib } else { 787235783Skib mmio = RING_HWS_PGA(ring->mmio_base); 788235783Skib } 789235783Skib 790235783Skib I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 791235783Skib POSTING_READ(mmio); 792235783Skib} 793235783Skib 794235783Skibstatic int 795235783Skibbsd_ring_flush(struct intel_ring_buffer *ring, 796235783Skib uint32_t invalidate_domains, 797235783Skib uint32_t flush_domains) 798235783Skib{ 799235783Skib int ret; 800235783Skib 801235783Skib ret = intel_ring_begin(ring, 2); 802235783Skib if (ret) 803235783Skib return ret; 804235783Skib 805235783Skib intel_ring_emit(ring, MI_FLUSH); 806235783Skib intel_ring_emit(ring, MI_NOOP); 807235783Skib intel_ring_advance(ring); 808235783Skib return 0; 809235783Skib} 810235783Skib 811235783Skibstatic int 812280369Skibi9xx_add_request(struct intel_ring_buffer *ring, 813280369Skib u32 *result) 814235783Skib{ 815280369Skib u32 seqno; 816235783Skib int ret; 817235783Skib 818235783Skib ret = intel_ring_begin(ring, 4); 819235783Skib if (ret) 820235783Skib return ret; 821235783Skib 822235783Skib seqno = i915_gem_next_request_seqno(ring); 823235783Skib 824235783Skib intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 825235783Skib intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 826235783Skib intel_ring_emit(ring, seqno); 827235783Skib intel_ring_emit(ring, MI_USER_INTERRUPT); 828235783Skib intel_ring_advance(ring); 829235783Skib 830235783Skib *result = seqno; 831235783Skib return 0; 832235783Skib} 833235783Skib 834235783Skibstatic bool 835280369Skibgen6_ring_get_irq(struct intel_ring_buffer *ring) 836235783Skib{ 837235783Skib struct drm_device *dev = ring->dev; 838235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 839235783Skib 840235783Skib if (!dev->irq_enabled) 841235783Skib return false; 842235783Skib 843235783Skib gen6_gt_force_wake_get(dev_priv); 844235783Skib 845280369Skib mtx_assert(&dev_priv->irq_lock, MA_OWNED); 846235783Skib if (ring->irq_refcount++ == 0) { 847280369Skib I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 848280369Skib dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; 849280369Skib I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 850280369Skib POSTING_READ(GTIMR); 851235783Skib } 852235783Skib 853235783Skib return true; 854235783Skib} 855235783Skib 856235783Skibstatic void 857280369Skibgen6_ring_put_irq(struct intel_ring_buffer *ring) 858235783Skib{ 859235783Skib struct drm_device *dev = ring->dev; 860235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 861235783Skib 862280369Skib mtx_assert(&dev_priv->irq_lock, MA_OWNED); 863235783Skib if (--ring->irq_refcount == 0) { 864280369Skib I915_WRITE_IMR(ring, ~0); 865280369Skib dev_priv->gt_irq_mask |= ring->irq_enable_mask; 866280369Skib I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 867280369Skib POSTING_READ(GTIMR); 868235783Skib } 869235783Skib 870235783Skib gen6_gt_force_wake_put(dev_priv); 871235783Skib} 872235783Skib 873280369Skibstatic int 874280369Skibi965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 875235783Skib{ 876280369Skib int ret; 877235783Skib 878280369Skib ret = intel_ring_begin(ring, 2); 879280369Skib if (ret) 880280369Skib return ret; 881235783Skib 882280369Skib intel_ring_emit(ring, 883280369Skib MI_BATCH_BUFFER_START | 884280369Skib MI_BATCH_GTT | 885280369Skib MI_BATCH_NON_SECURE_I965); 886280369Skib intel_ring_emit(ring, offset); 887280369Skib intel_ring_advance(ring); 888235783Skib 889280369Skib return 0; 890235783Skib} 891235783Skib 892235783Skibstatic int 893280369Skibi830_dispatch_execbuffer(struct intel_ring_buffer *ring, 894280369Skib u32 offset, u32 len) 895235783Skib{ 896235783Skib int ret; 897235783Skib 898280369Skib ret = intel_ring_begin(ring, 4); 899235783Skib if (ret) 900235783Skib return ret; 901235783Skib 902280369Skib intel_ring_emit(ring, MI_BATCH_BUFFER); 903280369Skib intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 904280369Skib intel_ring_emit(ring, offset + len - 8); 905280369Skib intel_ring_emit(ring, 0); 906235783Skib intel_ring_advance(ring); 907235783Skib 908235783Skib return 0; 909235783Skib} 910235783Skib 911235783Skibstatic int 912280369Skibi915_dispatch_execbuffer(struct intel_ring_buffer *ring, 913280369Skib u32 offset, u32 len) 914235783Skib{ 915235783Skib int ret; 916235783Skib 917280369Skib ret = intel_ring_begin(ring, 2); 918280369Skib if (ret) 919280369Skib return ret; 920235783Skib 921280369Skib intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 922280369Skib intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 923235783Skib intel_ring_advance(ring); 924235783Skib 925235783Skib return 0; 926235783Skib} 927235783Skib 928235783Skibstatic void cleanup_status_page(struct intel_ring_buffer *ring) 929235783Skib{ 930235783Skib struct drm_i915_gem_object *obj; 931235783Skib 932235783Skib obj = ring->status_page.obj; 933235783Skib if (obj == NULL) 934235783Skib return; 935235783Skib 936235783Skib pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1); 937254025Sjeff kva_free((vm_offset_t)ring->status_page.page_addr, 938235783Skib PAGE_SIZE); 939235783Skib i915_gem_object_unpin(obj); 940235783Skib drm_gem_object_unreference(&obj->base); 941235783Skib ring->status_page.obj = NULL; 942235783Skib} 943235783Skib 944235783Skibstatic int init_status_page(struct intel_ring_buffer *ring) 945235783Skib{ 946235783Skib struct drm_device *dev = ring->dev; 947235783Skib struct drm_i915_gem_object *obj; 948235783Skib int ret; 949235783Skib 950235783Skib obj = i915_gem_alloc_object(dev, 4096); 951235783Skib if (obj == NULL) { 952235783Skib DRM_ERROR("Failed to allocate status page\n"); 953235783Skib ret = -ENOMEM; 954235783Skib goto err; 955235783Skib } 956235783Skib 957235783Skib i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 958235783Skib 959235783Skib ret = i915_gem_object_pin(obj, 4096, true); 960235783Skib if (ret != 0) { 961235783Skib goto err_unref; 962235783Skib } 963235783Skib 964235783Skib ring->status_page.gfx_addr = obj->gtt_offset; 965254025Sjeff ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE); 966235783Skib if (ring->status_page.page_addr == NULL) { 967235783Skib goto err_unpin; 968235783Skib } 969235783Skib pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0], 970235783Skib 1); 971235783Skib pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr, 972273136Skib (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE); 973235783Skib ring->status_page.obj = obj; 974235783Skib memset(ring->status_page.page_addr, 0, PAGE_SIZE); 975235783Skib 976235783Skib intel_ring_setup_status_page(ring); 977235783Skib DRM_DEBUG("i915: init_status_page %s hws offset: 0x%08x\n", 978235783Skib ring->name, ring->status_page.gfx_addr); 979235783Skib 980235783Skib return 0; 981235783Skib 982235783Skiberr_unpin: 983235783Skib i915_gem_object_unpin(obj); 984235783Skiberr_unref: 985235783Skib drm_gem_object_unreference(&obj->base); 986235783Skiberr: 987235783Skib return ret; 988235783Skib} 989235783Skib 990280369Skibstatic int intel_init_ring_buffer(struct drm_device *dev, 991235783Skib struct intel_ring_buffer *ring) 992235783Skib{ 993235783Skib struct drm_i915_gem_object *obj; 994235783Skib int ret; 995235783Skib 996235783Skib ring->dev = dev; 997235783Skib INIT_LIST_HEAD(&ring->active_list); 998235783Skib INIT_LIST_HEAD(&ring->request_list); 999235783Skib INIT_LIST_HEAD(&ring->gpu_write_list); 1000280369Skib ring->size = 32 * PAGE_SIZE; 1001235783Skib 1002235783Skib if (I915_NEED_GFX_HWS(dev)) { 1003235783Skib ret = init_status_page(ring); 1004235783Skib if (ret) 1005235783Skib return ret; 1006235783Skib } 1007235783Skib 1008235783Skib obj = i915_gem_alloc_object(dev, ring->size); 1009235783Skib if (obj == NULL) { 1010235783Skib DRM_ERROR("Failed to allocate ringbuffer\n"); 1011235783Skib ret = -ENOMEM; 1012235783Skib goto err_hws; 1013235783Skib } 1014235783Skib 1015235783Skib ring->obj = obj; 1016235783Skib 1017235783Skib ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 1018235783Skib if (ret) 1019235783Skib goto err_unref; 1020235783Skib 1021280369Skib ring->virtual_start = pmap_mapdev_attr( 1022280369Skib dev->agp->base + obj->gtt_offset, ring->size, 1023280369Skib VM_MEMATTR_WRITE_COMBINING); 1024280369Skib if (ring->virtual_start == NULL) { 1025235783Skib DRM_ERROR("Failed to map ringbuffer.\n"); 1026235783Skib ret = -EINVAL; 1027235783Skib goto err_unpin; 1028235783Skib } 1029235783Skib 1030235783Skib ret = ring->init(ring); 1031235783Skib if (ret) 1032235783Skib goto err_unmap; 1033235783Skib 1034235783Skib /* Workaround an erratum on the i830 which causes a hang if 1035235783Skib * the TAIL pointer points to within the last 2 cachelines 1036235783Skib * of the buffer. 1037235783Skib */ 1038235783Skib ring->effective_size = ring->size; 1039235783Skib if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1040235783Skib ring->effective_size -= 128; 1041235783Skib 1042235783Skib return 0; 1043235783Skib 1044235783Skiberr_unmap: 1045280369Skib pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size); 1046235783Skiberr_unpin: 1047235783Skib i915_gem_object_unpin(obj); 1048235783Skiberr_unref: 1049235783Skib drm_gem_object_unreference(&obj->base); 1050235783Skib ring->obj = NULL; 1051235783Skiberr_hws: 1052235783Skib cleanup_status_page(ring); 1053235783Skib return ret; 1054235783Skib} 1055235783Skib 1056235783Skibvoid intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) 1057235783Skib{ 1058235783Skib struct drm_i915_private *dev_priv; 1059235783Skib int ret; 1060235783Skib 1061235783Skib if (ring->obj == NULL) 1062235783Skib return; 1063235783Skib 1064235783Skib /* Disable the ring buffer. The ring must be idle at this point */ 1065235783Skib dev_priv = ring->dev->dev_private; 1066235783Skib ret = intel_wait_ring_idle(ring); 1067235783Skib I915_WRITE_CTL(ring, 0); 1068235783Skib 1069280369Skib pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size); 1070235783Skib 1071235783Skib i915_gem_object_unpin(ring->obj); 1072235783Skib drm_gem_object_unreference(&ring->obj->base); 1073235783Skib ring->obj = NULL; 1074235783Skib 1075235783Skib if (ring->cleanup) 1076235783Skib ring->cleanup(ring); 1077235783Skib 1078235783Skib cleanup_status_page(ring); 1079235783Skib} 1080235783Skib 1081235783Skibstatic int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 1082235783Skib{ 1083280369Skib uint32_t *virt; 1084235783Skib int rem = ring->size - ring->tail; 1085235783Skib 1086235783Skib if (ring->space < rem) { 1087235783Skib int ret = intel_wait_ring_buffer(ring, rem); 1088235783Skib if (ret) 1089235783Skib return ret; 1090235783Skib } 1091235783Skib 1092280369Skib virt = (uint32_t *)((char *)ring->virtual_start + ring->tail); 1093280369Skib rem /= 4; 1094280369Skib while (rem--) 1095235783Skib *virt++ = MI_NOOP; 1096235783Skib 1097235783Skib ring->tail = 0; 1098235783Skib ring->space = ring_space(ring); 1099235783Skib 1100235783Skib return 0; 1101235783Skib} 1102235783Skib 1103235783Skibstatic int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1104235783Skib{ 1105235783Skib struct drm_i915_private *dev_priv = ring->dev->dev_private; 1106235783Skib bool was_interruptible; 1107235783Skib int ret; 1108235783Skib 1109235783Skib /* XXX As we have not yet audited all the paths to check that 1110235783Skib * they are ready for ERESTARTSYS from intel_ring_begin, do not 1111235783Skib * allow us to be interruptible by a signal. 1112235783Skib */ 1113235783Skib was_interruptible = dev_priv->mm.interruptible; 1114235783Skib dev_priv->mm.interruptible = false; 1115235783Skib 1116280369Skib ret = i915_wait_request(ring, seqno); 1117235783Skib 1118235783Skib dev_priv->mm.interruptible = was_interruptible; 1119280369Skib if (!ret) 1120280369Skib i915_gem_retire_requests_ring(ring); 1121235783Skib 1122235783Skib return ret; 1123235783Skib} 1124235783Skib 1125235783Skibstatic int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) 1126235783Skib{ 1127235783Skib struct drm_i915_gem_request *request; 1128235783Skib u32 seqno = 0; 1129235783Skib int ret; 1130235783Skib 1131235783Skib i915_gem_retire_requests_ring(ring); 1132235783Skib 1133235783Skib if (ring->last_retired_head != -1) { 1134235783Skib ring->head = ring->last_retired_head; 1135235783Skib ring->last_retired_head = -1; 1136235783Skib ring->space = ring_space(ring); 1137235783Skib if (ring->space >= n) 1138235783Skib return 0; 1139235783Skib } 1140235783Skib 1141235783Skib list_for_each_entry(request, &ring->request_list, list) { 1142235783Skib int space; 1143235783Skib 1144235783Skib if (request->tail == -1) 1145235783Skib continue; 1146235783Skib 1147235783Skib space = request->tail - (ring->tail + 8); 1148235783Skib if (space < 0) 1149235783Skib space += ring->size; 1150235783Skib if (space >= n) { 1151235783Skib seqno = request->seqno; 1152235783Skib break; 1153235783Skib } 1154235783Skib 1155235783Skib /* Consume this request in case we need more space than 1156235783Skib * is available and so need to prevent a race between 1157235783Skib * updating last_retired_head and direct reads of 1158235783Skib * I915_RING_HEAD. It also provides a nice sanity check. 1159235783Skib */ 1160235783Skib request->tail = -1; 1161235783Skib } 1162235783Skib 1163235783Skib if (seqno == 0) 1164235783Skib return -ENOSPC; 1165235783Skib 1166235783Skib ret = intel_ring_wait_seqno(ring, seqno); 1167235783Skib if (ret) 1168235783Skib return ret; 1169235783Skib 1170235783Skib if (ring->last_retired_head == -1) 1171235783Skib return -ENOSPC; 1172235783Skib 1173235783Skib ring->head = ring->last_retired_head; 1174235783Skib ring->last_retired_head = -1; 1175235783Skib ring->space = ring_space(ring); 1176235783Skib if (ring->space < n) 1177235783Skib return -ENOSPC; 1178235783Skib 1179235783Skib return 0; 1180235783Skib} 1181235783Skib 1182235783Skibint intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 1183235783Skib{ 1184235783Skib struct drm_device *dev = ring->dev; 1185235783Skib struct drm_i915_private *dev_priv = dev->dev_private; 1186235783Skib int end; 1187235783Skib int ret; 1188235783Skib 1189235783Skib ret = intel_ring_wait_request(ring, n); 1190235783Skib if (ret != -ENOSPC) 1191235783Skib return ret; 1192235783Skib 1193235783Skib CTR1(KTR_DRM, "ring_wait_begin %s", ring->name); 1194280369Skib /* With GEM the hangcheck timer should kick us out of the loop, 1195280369Skib * leaving it early runs the risk of corrupting GEM state (due 1196280369Skib * to running on almost untested codepaths). But on resume 1197280369Skib * timers don't work yet, so prevent a complete hang in that 1198280369Skib * case by choosing an insanely large timeout. */ 1199280369Skib end = ticks + hz * 60; 1200280369Skib 1201235783Skib do { 1202235783Skib ring->head = I915_READ_HEAD(ring); 1203235783Skib ring->space = ring_space(ring); 1204235783Skib if (ring->space >= n) { 1205235783Skib CTR1(KTR_DRM, "ring_wait_end %s", ring->name); 1206235783Skib return 0; 1207235783Skib } 1208235783Skib 1209235783Skib if (dev->primary->master) { 1210235783Skib struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1211235783Skib if (master_priv->sarea_priv) 1212235783Skib master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1213235783Skib } 1214235783Skib 1215235783Skib pause("915rng", 1); 1216235783Skib if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) { 1217235783Skib CTR1(KTR_DRM, "ring_wait_end %s wedged", ring->name); 1218235783Skib return -EAGAIN; 1219235783Skib } 1220235783Skib } while (!time_after(ticks, end)); 1221235783Skib CTR1(KTR_DRM, "ring_wait_end %s busy", ring->name); 1222235783Skib return -EBUSY; 1223235783Skib} 1224235783Skib 1225235783Skibint intel_ring_begin(struct intel_ring_buffer *ring, 1226235783Skib int num_dwords) 1227235783Skib{ 1228235783Skib struct drm_i915_private *dev_priv = ring->dev->dev_private; 1229235783Skib int n = 4*num_dwords; 1230235783Skib int ret; 1231235783Skib 1232235783Skib if (atomic_load_acq_int(&dev_priv->mm.wedged)) 1233235783Skib return -EIO; 1234235783Skib 1235235783Skib if (ring->tail + n > ring->effective_size) { 1236235783Skib ret = intel_wrap_ring_buffer(ring); 1237235783Skib if (ret != 0) 1238235783Skib return ret; 1239235783Skib } 1240235783Skib 1241235783Skib if (ring->space < n) { 1242235783Skib ret = intel_wait_ring_buffer(ring, n); 1243235783Skib if (ret != 0) 1244235783Skib return ret; 1245235783Skib } 1246235783Skib 1247235783Skib ring->space -= n; 1248235783Skib return 0; 1249235783Skib} 1250235783Skib 1251235783Skibvoid intel_ring_advance(struct intel_ring_buffer *ring) 1252235783Skib{ 1253280369Skib struct drm_i915_private *dev_priv = ring->dev->dev_private; 1254280369Skib 1255235783Skib ring->tail &= ring->size - 1; 1256280369Skib if (dev_priv->stop_rings & intel_ring_flag(ring)) 1257280369Skib return; 1258235783Skib ring->write_tail(ring, ring->tail); 1259235783Skib} 1260235783Skib 1261235783Skib 1262235783Skibstatic void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1263280369Skib u32 value) 1264235783Skib{ 1265235783Skib drm_i915_private_t *dev_priv = ring->dev->dev_private; 1266235783Skib 1267235783Skib /* Every tail move must follow the sequence below */ 1268235783Skib I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1269235783Skib GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | 1270235783Skib GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); 1271235783Skib I915_WRITE(GEN6_BSD_RNCID, 0x0); 1272235783Skib 1273235783Skib if (_intel_wait_for(ring->dev, 1274235783Skib (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 1275235783Skib GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50, 1276235783Skib true, "915g6i") != 0) 1277235783Skib DRM_ERROR("timed out waiting for IDLE Indicator\n"); 1278235783Skib 1279235783Skib I915_WRITE_TAIL(ring, value); 1280235783Skib I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1281235783Skib GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | 1282235783Skib GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 1283235783Skib} 1284235783Skib 1285235783Skibstatic int gen6_ring_flush(struct intel_ring_buffer *ring, 1286235783Skib uint32_t invalidate, uint32_t flush) 1287235783Skib{ 1288235783Skib uint32_t cmd; 1289235783Skib int ret; 1290235783Skib 1291235783Skib ret = intel_ring_begin(ring, 4); 1292235783Skib if (ret) 1293235783Skib return ret; 1294235783Skib 1295235783Skib cmd = MI_FLUSH_DW; 1296235783Skib if (invalidate & I915_GEM_GPU_DOMAINS) 1297235783Skib cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 1298235783Skib intel_ring_emit(ring, cmd); 1299235783Skib intel_ring_emit(ring, 0); 1300235783Skib intel_ring_emit(ring, 0); 1301235783Skib intel_ring_emit(ring, MI_NOOP); 1302235783Skib intel_ring_advance(ring); 1303235783Skib return 0; 1304235783Skib} 1305235783Skib 1306235783Skibstatic int 1307235783Skibgen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1308235783Skib uint32_t offset, uint32_t len) 1309235783Skib{ 1310235783Skib int ret; 1311235783Skib 1312235783Skib ret = intel_ring_begin(ring, 2); 1313235783Skib if (ret) 1314235783Skib return ret; 1315235783Skib 1316235783Skib intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); 1317235783Skib /* bit0-7 is the length on GEN6+ */ 1318235783Skib intel_ring_emit(ring, offset); 1319235783Skib intel_ring_advance(ring); 1320235783Skib 1321235783Skib return 0; 1322235783Skib} 1323235783Skib 1324235783Skib/* Blitter support (SandyBridge+) */ 1325235783Skib 1326235783Skibstatic int blt_ring_flush(struct intel_ring_buffer *ring, 1327280369Skib u32 invalidate, u32 flush) 1328235783Skib{ 1329280369Skib u32 cmd; 1330235783Skib int ret; 1331235783Skib 1332235783Skib ret = intel_ring_begin(ring, 4); 1333235783Skib if (ret) 1334235783Skib return ret; 1335235783Skib 1336235783Skib cmd = MI_FLUSH_DW; 1337235783Skib if (invalidate & I915_GEM_DOMAIN_RENDER) 1338235783Skib cmd |= MI_INVALIDATE_TLB; 1339235783Skib intel_ring_emit(ring, cmd); 1340235783Skib intel_ring_emit(ring, 0); 1341235783Skib intel_ring_emit(ring, 0); 1342235783Skib intel_ring_emit(ring, MI_NOOP); 1343235783Skib intel_ring_advance(ring); 1344235783Skib return 0; 1345235783Skib} 1346235783Skib 1347235783Skibint intel_init_render_ring_buffer(struct drm_device *dev) 1348235783Skib{ 1349235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1350235783Skib struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; 1351235783Skib 1352280369Skib ring->name = "render ring"; 1353280369Skib ring->id = RCS; 1354280369Skib ring->mmio_base = RENDER_RING_BASE; 1355280369Skib 1356235783Skib if (INTEL_INFO(dev)->gen >= 6) { 1357235783Skib ring->add_request = gen6_add_request; 1358235783Skib ring->flush = gen6_render_ring_flush; 1359280369Skib ring->irq_get = gen6_ring_get_irq; 1360280369Skib ring->irq_put = gen6_ring_put_irq; 1361280369Skib ring->irq_enable_mask = GT_USER_INTERRUPT; 1362235783Skib ring->get_seqno = gen6_ring_get_seqno; 1363280369Skib ring->sync_to = gen6_ring_sync; 1364280369Skib ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; 1365280369Skib ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; 1366280369Skib ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; 1367280369Skib ring->signal_mbox[0] = GEN6_VRSYNC; 1368280369Skib ring->signal_mbox[1] = GEN6_BRSYNC; 1369235783Skib } else if (IS_GEN5(dev)) { 1370235783Skib ring->add_request = pc_render_add_request; 1371280369Skib ring->flush = gen4_render_ring_flush; 1372235783Skib ring->get_seqno = pc_render_get_seqno; 1373280369Skib ring->irq_get = gen5_ring_get_irq; 1374280369Skib ring->irq_put = gen5_ring_put_irq; 1375280369Skib ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; 1376280369Skib } else { 1377280369Skib ring->add_request = i9xx_add_request; 1378280369Skib if (INTEL_INFO(dev)->gen < 4) 1379280369Skib ring->flush = gen2_render_ring_flush; 1380280369Skib else 1381280369Skib ring->flush = gen4_render_ring_flush; 1382280369Skib ring->get_seqno = ring_get_seqno; 1383280369Skib if (IS_GEN2(dev)) { 1384280369Skib ring->irq_get = i8xx_ring_get_irq; 1385280369Skib ring->irq_put = i8xx_ring_put_irq; 1386280369Skib } else { 1387280369Skib ring->irq_get = i9xx_ring_get_irq; 1388280369Skib ring->irq_put = i9xx_ring_put_irq; 1389280369Skib } 1390280369Skib ring->irq_enable_mask = I915_USER_INTERRUPT; 1391235783Skib } 1392280369Skib ring->write_tail = ring_write_tail; 1393280369Skib if (INTEL_INFO(dev)->gen >= 6) 1394280369Skib ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1395280369Skib else if (INTEL_INFO(dev)->gen >= 4) 1396280369Skib ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1397280369Skib else if (IS_I830(dev) || IS_845G(dev)) 1398280369Skib ring->dispatch_execbuffer = i830_dispatch_execbuffer; 1399280369Skib else 1400280369Skib ring->dispatch_execbuffer = i915_dispatch_execbuffer; 1401280369Skib ring->init = init_render_ring; 1402280369Skib ring->cleanup = render_ring_cleanup; 1403235783Skib 1404280369Skib 1405235783Skib if (!I915_NEED_GFX_HWS(dev)) { 1406235783Skib ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1407235783Skib memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1408235783Skib } 1409235783Skib 1410235783Skib return intel_init_ring_buffer(dev, ring); 1411235783Skib} 1412235783Skib 1413280369Skibint intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 1414235783Skib{ 1415235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1416235783Skib struct intel_ring_buffer *ring = &dev_priv->rings[RCS]; 1417235783Skib 1418280369Skib ring->name = "render ring"; 1419280369Skib ring->id = RCS; 1420280369Skib ring->mmio_base = RENDER_RING_BASE; 1421280369Skib 1422235783Skib if (INTEL_INFO(dev)->gen >= 6) { 1423280369Skib /* non-kms not supported on gen6+ */ 1424280369Skib return -ENODEV; 1425235783Skib } 1426235783Skib 1427280369Skib /* Note: gem is not supported on gen5/ilk without kms (the corresponding 1428280369Skib * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 1429280369Skib * the special gen5 functions. */ 1430280369Skib ring->add_request = i9xx_add_request; 1431280369Skib if (INTEL_INFO(dev)->gen < 4) 1432280369Skib ring->flush = gen2_render_ring_flush; 1433280369Skib else 1434280369Skib ring->flush = gen4_render_ring_flush; 1435280369Skib ring->get_seqno = ring_get_seqno; 1436280369Skib if (IS_GEN2(dev)) { 1437280369Skib ring->irq_get = i8xx_ring_get_irq; 1438280369Skib ring->irq_put = i8xx_ring_put_irq; 1439280369Skib } else { 1440280369Skib ring->irq_get = i9xx_ring_get_irq; 1441280369Skib ring->irq_put = i9xx_ring_put_irq; 1442280369Skib } 1443280369Skib ring->irq_enable_mask = I915_USER_INTERRUPT; 1444280369Skib ring->write_tail = ring_write_tail; 1445280369Skib if (INTEL_INFO(dev)->gen >= 4) 1446280369Skib ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1447280369Skib else if (IS_I830(dev) || IS_845G(dev)) 1448280369Skib ring->dispatch_execbuffer = i830_dispatch_execbuffer; 1449280369Skib else 1450280369Skib ring->dispatch_execbuffer = i915_dispatch_execbuffer; 1451280369Skib ring->init = init_render_ring; 1452280369Skib ring->cleanup = render_ring_cleanup; 1453280369Skib 1454235783Skib ring->dev = dev; 1455235783Skib INIT_LIST_HEAD(&ring->active_list); 1456235783Skib INIT_LIST_HEAD(&ring->request_list); 1457235783Skib INIT_LIST_HEAD(&ring->gpu_write_list); 1458235783Skib 1459235783Skib ring->size = size; 1460235783Skib ring->effective_size = ring->size; 1461235783Skib if (IS_I830(ring->dev)) 1462235783Skib ring->effective_size -= 128; 1463235783Skib 1464280369Skib ring->virtual_start = pmap_mapdev_attr(start, size, 1465280369Skib VM_MEMATTR_WRITE_COMBINING); 1466280369Skib if (ring->virtual_start == NULL) { 1467235783Skib DRM_ERROR("can not ioremap virtual address for" 1468235783Skib " ring buffer\n"); 1469235783Skib return -ENOMEM; 1470235783Skib } 1471235783Skib 1472235783Skib return 0; 1473235783Skib} 1474235783Skib 1475235783Skibint intel_init_bsd_ring_buffer(struct drm_device *dev) 1476235783Skib{ 1477235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1478235783Skib struct intel_ring_buffer *ring = &dev_priv->rings[VCS]; 1479235783Skib 1480280369Skib ring->name = "bsd ring"; 1481280369Skib ring->id = VCS; 1482235783Skib 1483280369Skib ring->write_tail = ring_write_tail; 1484280369Skib if (IS_GEN6(dev) || IS_GEN7(dev)) { 1485280369Skib ring->mmio_base = GEN6_BSD_RING_BASE; 1486280369Skib /* gen6 bsd needs a special wa for tail updates */ 1487280369Skib if (IS_GEN6(dev)) 1488280369Skib ring->write_tail = gen6_bsd_ring_write_tail; 1489280369Skib ring->flush = gen6_ring_flush; 1490280369Skib ring->add_request = gen6_add_request; 1491280369Skib ring->get_seqno = gen6_ring_get_seqno; 1492280369Skib ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; 1493280369Skib ring->irq_get = gen6_ring_get_irq; 1494280369Skib ring->irq_put = gen6_ring_put_irq; 1495280369Skib ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1496280369Skib ring->sync_to = gen6_ring_sync; 1497280369Skib ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; 1498280369Skib ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; 1499280369Skib ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; 1500280369Skib ring->signal_mbox[0] = GEN6_RVSYNC; 1501280369Skib ring->signal_mbox[1] = GEN6_BVSYNC; 1502280369Skib } else { 1503280369Skib ring->mmio_base = BSD_RING_BASE; 1504280369Skib ring->flush = bsd_ring_flush; 1505280369Skib ring->add_request = i9xx_add_request; 1506280369Skib ring->get_seqno = ring_get_seqno; 1507280369Skib if (IS_GEN5(dev)) { 1508280369Skib ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1509280369Skib ring->irq_get = gen5_ring_get_irq; 1510280369Skib ring->irq_put = gen5_ring_put_irq; 1511280369Skib } else { 1512280369Skib ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1513280369Skib ring->irq_get = i9xx_ring_get_irq; 1514280369Skib ring->irq_put = i9xx_ring_put_irq; 1515280369Skib } 1516280369Skib ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1517280369Skib } 1518280369Skib ring->init = init_ring_common; 1519280369Skib 1520280369Skib 1521235783Skib return intel_init_ring_buffer(dev, ring); 1522235783Skib} 1523235783Skib 1524235783Skibint intel_init_blt_ring_buffer(struct drm_device *dev) 1525235783Skib{ 1526235783Skib drm_i915_private_t *dev_priv = dev->dev_private; 1527235783Skib struct intel_ring_buffer *ring = &dev_priv->rings[BCS]; 1528235783Skib 1529280369Skib ring->name = "blitter ring"; 1530280369Skib ring->id = BCS; 1531235783Skib 1532280369Skib ring->mmio_base = BLT_RING_BASE; 1533280369Skib ring->write_tail = ring_write_tail; 1534280369Skib ring->flush = blt_ring_flush; 1535280369Skib ring->add_request = gen6_add_request; 1536280369Skib ring->get_seqno = gen6_ring_get_seqno; 1537280369Skib ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; 1538280369Skib ring->irq_get = gen6_ring_get_irq; 1539280369Skib ring->irq_put = gen6_ring_put_irq; 1540280369Skib ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1541280369Skib ring->sync_to = gen6_ring_sync; 1542280369Skib ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; 1543280369Skib ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; 1544280369Skib ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; 1545280369Skib ring->signal_mbox[0] = GEN6_RBSYNC; 1546280369Skib ring->signal_mbox[1] = GEN6_VBSYNC; 1547280369Skib ring->init = init_ring_common; 1548280369Skib 1549235783Skib return intel_init_ring_buffer(dev, ring); 1550235783Skib} 1551