1235783Skib/* 2235783Skib * $FreeBSD$ 3235783Skib */ 4235783Skib 5235783Skib#ifndef _INTEL_RINGBUFFER_H_ 6235783Skib#define _INTEL_RINGBUFFER_H_ 7235783Skib 8235783Skibstruct intel_hw_status_page { 9235783Skib uint32_t *page_addr; 10235783Skib unsigned int gfx_addr; 11235783Skib struct drm_i915_gem_object *obj; 12235783Skib}; 13235783Skib 14235783Skib#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 15235783Skib#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 16235783Skib 17235783Skib#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 18235783Skib#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 19235783Skib 20235783Skib#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 21235783Skib#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 22235783Skib 23235783Skib#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 24235783Skib#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 25235783Skib 26235783Skib#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 27235783Skib#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 28235783Skib 29235783Skib#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) 30235783Skib#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 31235783Skib#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 32235783Skib 33235783Skibstruct intel_ring_buffer { 34235783Skib const char *name; 35235783Skib enum intel_ring_id { 36235783Skib RCS = 0x0, 37235783Skib VCS, 38235783Skib BCS, 39235783Skib } id; 40235783Skib#define I915_NUM_RINGS 3 41235783Skib uint32_t mmio_base; 42235783Skib void *virtual_start; 43235783Skib struct drm_device *dev; 44235783Skib struct drm_i915_gem_object *obj; 45235783Skib 46235783Skib uint32_t head; 47235783Skib uint32_t tail; 48235783Skib int space; 49235783Skib int size; 50235783Skib int effective_size; 51235783Skib struct intel_hw_status_page status_page; 52235783Skib 53235783Skib /** We track the position of the requests in the ring buffer, and 54235783Skib * when each is retired we increment last_retired_head as the GPU 55235783Skib * must have finished processing the request and so we know we 56235783Skib * can advance the ringbuffer up to that position. 57235783Skib * 58235783Skib * last_retired_head is set to -1 after the value is consumed so 59235783Skib * we can detect new retirements. 60235783Skib */ 61235783Skib u32 last_retired_head; 62235783Skib 63235783Skib struct mtx irq_lock; 64235783Skib uint32_t irq_refcount; 65235783Skib uint32_t irq_mask; 66235783Skib uint32_t irq_seqno; /* last seq seem at irq time */ 67235783Skib uint32_t trace_irq_seqno; 68235783Skib uint32_t waiting_seqno; 69235783Skib uint32_t sync_seqno[I915_NUM_RINGS-1]; 70235783Skib bool (*irq_get)(struct intel_ring_buffer *ring); 71235783Skib void (*irq_put)(struct intel_ring_buffer *ring); 72235783Skib 73235783Skib int (*init)(struct intel_ring_buffer *ring); 74235783Skib 75235783Skib void (*write_tail)(struct intel_ring_buffer *ring, 76235783Skib uint32_t value); 77235783Skib int (*flush)(struct intel_ring_buffer *ring, 78235783Skib uint32_t invalidate_domains, 79235783Skib uint32_t flush_domains); 80235783Skib int (*add_request)(struct intel_ring_buffer *ring, 81235783Skib uint32_t *seqno); 82235783Skib uint32_t (*get_seqno)(struct intel_ring_buffer *ring); 83235783Skib int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 84235783Skib uint32_t offset, uint32_t length); 85235783Skib void (*cleanup)(struct intel_ring_buffer *ring); 86235783Skib int (*sync_to)(struct intel_ring_buffer *ring, 87235783Skib struct intel_ring_buffer *to, 88235783Skib u32 seqno); 89235783Skib 90235783Skib u32 semaphore_register[3]; /*our mbox written by others */ 91235783Skib u32 signal_mbox[2]; /* mboxes this ring signals to */ 92235783Skib 93235783Skib /** 94235783Skib * List of objects currently involved in rendering from the 95235783Skib * ringbuffer. 96235783Skib * 97235783Skib * Includes buffers having the contents of their GPU caches 98235783Skib * flushed, not necessarily primitives. last_rendering_seqno 99235783Skib * represents when the rendering involved will be completed. 100235783Skib * 101235783Skib * A reference is held on the buffer while on this list. 102235783Skib */ 103235783Skib struct list_head active_list; 104235783Skib 105235783Skib /** 106235783Skib * List of breadcrumbs associated with GPU requests currently 107235783Skib * outstanding. 108235783Skib */ 109235783Skib struct list_head request_list; 110235783Skib 111235783Skib /** 112235783Skib * List of objects currently pending a GPU write flush. 113235783Skib * 114235783Skib * All elements on this list will belong to either the 115235783Skib * active_list or flushing_list, last_rendering_seqno can 116235783Skib * be used to differentiate between the two elements. 117235783Skib */ 118235783Skib struct list_head gpu_write_list; 119235783Skib 120235783Skib /** 121235783Skib * Do we have some not yet emitted requests outstanding? 122235783Skib */ 123235783Skib uint32_t outstanding_lazy_request; 124235783Skib 125271816Sdumbbell /** 126271816Sdumbbell * Do an explicit TLB flush before MI_SET_CONTEXT 127271816Sdumbbell */ 128271816Sdumbbell bool itlb_before_ctx_switch; 129271816Sdumbbell struct i915_hw_context *default_context; 130271816Sdumbbell struct drm_i915_gem_object *last_context_obj; 131271816Sdumbbell 132235783Skib drm_local_map_t map; 133235783Skib 134235783Skib void *private; 135235783Skib}; 136235783Skib 137271816Sdumbbellstatic inline bool 138271816Sdumbbellintel_ring_initialized(struct intel_ring_buffer *ring) 139271816Sdumbbell{ 140271816Sdumbbell return ring->obj != NULL; 141271816Sdumbbell} 142271816Sdumbbell 143235783Skibstatic inline unsigned 144235783Skibintel_ring_flag(struct intel_ring_buffer *ring) 145235783Skib{ 146235783Skib return 1 << ring->id; 147235783Skib} 148235783Skib 149235783Skibstatic inline uint32_t 150235783Skibintel_ring_sync_index(struct intel_ring_buffer *ring, 151235783Skib struct intel_ring_buffer *other) 152235783Skib{ 153235783Skib int idx; 154235783Skib 155235783Skib /* 156235783Skib * cs -> 0 = vcs, 1 = bcs 157235783Skib * vcs -> 0 = bcs, 1 = cs, 158235783Skib * bcs -> 0 = cs, 1 = vcs. 159235783Skib */ 160235783Skib 161235783Skib idx = (other - ring) - 1; 162235783Skib if (idx < 0) 163235783Skib idx += I915_NUM_RINGS; 164235783Skib 165235783Skib return idx; 166235783Skib} 167235783Skib 168235783Skibstatic inline uint32_t 169235783Skibintel_read_status_page(struct intel_ring_buffer *ring, int reg) 170235783Skib{ 171235783Skib 172235783Skib return (atomic_load_acq_32(ring->status_page.page_addr + reg)); 173235783Skib} 174235783Skib 175235783Skibvoid intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 176235783Skib 177235783Skibint intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); 178235783Skibstatic inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) 179235783Skib{ 180235783Skib 181235783Skib return (intel_wait_ring_buffer(ring, ring->size - 8)); 182235783Skib} 183235783Skib 184235783Skibint intel_ring_begin(struct intel_ring_buffer *ring, int n); 185235783Skib 186235783Skibstatic inline void intel_ring_emit(struct intel_ring_buffer *ring, 187235783Skib uint32_t data) 188235783Skib{ 189235783Skib *(volatile uint32_t *)((char *)ring->virtual_start + 190235783Skib ring->tail) = data; 191235783Skib ring->tail += 4; 192235783Skib} 193235783Skib 194235783Skibvoid intel_ring_advance(struct intel_ring_buffer *ring); 195235783Skib 196235783Skibuint32_t intel_ring_get_seqno(struct intel_ring_buffer *ring); 197235783Skib 198235783Skibint intel_init_render_ring_buffer(struct drm_device *dev); 199235783Skibint intel_init_bsd_ring_buffer(struct drm_device *dev); 200235783Skibint intel_init_blt_ring_buffer(struct drm_device *dev); 201235783Skib 202235783Skibu32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 203235783Skibvoid intel_ring_setup_status_page(struct intel_ring_buffer *ring); 204235783Skib 205235783Skibstatic inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) 206235783Skib{ 207235783Skib return ring->tail; 208235783Skib} 209235783Skib 210235783Skibvoid i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno); 211235783Skib 212235783Skib/* DRI warts */ 213235783Skibint intel_render_ring_init_dri(struct drm_device *dev, uint64_t start, 214235783Skib uint32_t size); 215235783Skib 216235783Skib#endif /* _INTEL_RINGBUFFER_H_ */ 217