1/*	$NetBSD: intel_ring.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
2
3/*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright �� 2019 Intel Corporation
7 */
8
9#include <sys/cdefs.h>
10__KERNEL_RCSID(0, "$NetBSD: intel_ring.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
11
12#include "gem/i915_gem_object.h"
13#include "i915_drv.h"
14#include "i915_vma.h"
15#include "intel_engine.h"
16#include "intel_ring.h"
17#include "intel_timeline.h"
18
19unsigned int intel_ring_update_space(struct intel_ring *ring)
20{
21	unsigned int space;
22
23	space = __intel_ring_space(ring->head, ring->emit, ring->size);
24
25	ring->space = space;
26	return space;
27}
28
29int intel_ring_pin(struct intel_ring *ring)
30{
31	struct i915_vma *vma = ring->vma;
32	unsigned int flags;
33	void *addr;
34	int ret;
35
36	if (atomic_fetch_inc(&ring->pin_count))
37		return 0;
38
39	flags = PIN_GLOBAL;
40
41	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
42	flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
43
44	if (vma->obj->stolen)
45		flags |= PIN_MAPPABLE;
46	else
47		flags |= PIN_HIGH;
48
49	ret = i915_vma_pin(vma, 0, 0, flags);
50	if (unlikely(ret))
51		goto err_unpin;
52
53	if (i915_vma_is_map_and_fenceable(vma))
54		addr = (void __force *)i915_vma_pin_iomap(vma);
55	else
56		addr = i915_gem_object_pin_map(vma->obj,
57					       i915_coherent_map_type(vma->vm->i915));
58	if (IS_ERR(addr)) {
59		ret = PTR_ERR(addr);
60		goto err_ring;
61	}
62
63	i915_vma_make_unshrinkable(vma);
64
65	/* Discard any unused bytes beyond that submitted to hw. */
66	intel_ring_reset(ring, ring->emit);
67
68	ring->vaddr = addr;
69	return 0;
70
71err_ring:
72	i915_vma_unpin(vma);
73err_unpin:
74	atomic_dec(&ring->pin_count);
75	return ret;
76}
77
78void intel_ring_reset(struct intel_ring *ring, u32 tail)
79{
80	tail = intel_ring_wrap(ring, tail);
81	ring->tail = tail;
82	ring->head = tail;
83	ring->emit = tail;
84	intel_ring_update_space(ring);
85}
86
87void intel_ring_unpin(struct intel_ring *ring)
88{
89	struct i915_vma *vma = ring->vma;
90
91	if (!atomic_dec_and_test(&ring->pin_count))
92		return;
93
94	i915_vma_unset_ggtt_write(vma);
95	if (i915_vma_is_map_and_fenceable(vma))
96		i915_vma_unpin_iomap(vma);
97	else
98		i915_gem_object_unpin_map(vma->obj);
99
100	i915_vma_make_purgeable(vma);
101	i915_vma_unpin(vma);
102}
103
104static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
105{
106	struct i915_address_space *vm = &ggtt->vm;
107	struct drm_i915_private *i915 = vm->i915;
108	struct drm_i915_gem_object *obj;
109	struct i915_vma *vma;
110
111	obj = ERR_PTR(-ENODEV);
112	if (i915_ggtt_has_aperture(ggtt))
113		obj = i915_gem_object_create_stolen(i915, size);
114	if (IS_ERR(obj))
115		obj = i915_gem_object_create_internal(i915, size);
116	if (IS_ERR(obj))
117		return ERR_CAST(obj);
118
119	/*
120	 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
121	 * if supported by the platform's GGTT.
122	 */
123	if (vm->has_read_only)
124		i915_gem_object_set_readonly(obj);
125
126	vma = i915_vma_instance(obj, vm, NULL);
127	if (IS_ERR(vma))
128		goto err;
129
130	return vma;
131
132err:
133	i915_gem_object_put(obj);
134	return vma;
135}
136
137struct intel_ring *
138intel_engine_create_ring(struct intel_engine_cs *engine, int size)
139{
140	struct drm_i915_private *i915 = engine->i915;
141	struct intel_ring *ring;
142	struct i915_vma *vma;
143
144	GEM_BUG_ON(!is_power_of_2(size));
145	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
146
147	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
148	if (!ring)
149		return ERR_PTR(-ENOMEM);
150
151	kref_init(&ring->ref);
152	ring->size = size;
153	ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
154
155	/*
156	 * Workaround an erratum on the i830 which causes a hang if
157	 * the TAIL pointer points to within the last 2 cachelines
158	 * of the buffer.
159	 */
160	ring->effective_size = size;
161	if (IS_I830(i915) || IS_I845G(i915))
162		ring->effective_size -= 2 * CACHELINE_BYTES;
163
164	intel_ring_update_space(ring);
165
166	vma = create_ring_vma(engine->gt->ggtt, size);
167	if (IS_ERR(vma)) {
168		kfree(ring);
169		return ERR_CAST(vma);
170	}
171	ring->vma = vma;
172
173	return ring;
174}
175
176void intel_ring_free(struct kref *ref)
177{
178	struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
179
180	i915_vma_put(ring->vma);
181	kfree(ring);
182}
183
184static noinline int
185wait_for_space(struct intel_ring *ring,
186	       struct intel_timeline *tl,
187	       unsigned int bytes)
188{
189	struct i915_request *target;
190	long timeout;
191
192	if (intel_ring_update_space(ring) >= bytes)
193		return 0;
194
195	GEM_BUG_ON(list_empty(&tl->requests));
196	list_for_each_entry(target, &tl->requests, link) {
197		if (target->ring != ring)
198			continue;
199
200		/* Would completion of this request free enough space? */
201		if (bytes <= __intel_ring_space(target->postfix,
202						ring->emit, ring->size))
203			break;
204	}
205
206	if (GEM_WARN_ON(&target->link == &tl->requests))
207		return -ENOSPC;
208
209	timeout = i915_request_wait(target,
210				    I915_WAIT_INTERRUPTIBLE,
211				    MAX_SCHEDULE_TIMEOUT);
212	if (timeout < 0)
213		return timeout;
214
215	i915_request_retire_upto(target);
216
217	intel_ring_update_space(ring);
218	GEM_BUG_ON(ring->space < bytes);
219	return 0;
220}
221
222u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
223{
224	struct intel_ring *ring = rq->ring;
225	const unsigned int remain_usable = ring->effective_size - ring->emit;
226	const unsigned int bytes = num_dwords * sizeof(u32);
227	unsigned int need_wrap = 0;
228	unsigned int total_bytes;
229	u32 *cs;
230
231	/* Packets must be qword aligned. */
232	GEM_BUG_ON(num_dwords & 1);
233
234	total_bytes = bytes + rq->reserved_space;
235	GEM_BUG_ON(total_bytes > ring->effective_size);
236
237	if (unlikely(total_bytes > remain_usable)) {
238		const int remain_actual = ring->size - ring->emit;
239
240		if (bytes > remain_usable) {
241			/*
242			 * Not enough space for the basic request. So need to
243			 * flush out the remainder and then wait for
244			 * base + reserved.
245			 */
246			total_bytes += remain_actual;
247			need_wrap = remain_actual | 1;
248		} else  {
249			/*
250			 * The base request will fit but the reserved space
251			 * falls off the end. So we don't need an immediate
252			 * wrap and only need to effectively wait for the
253			 * reserved size from the start of ringbuffer.
254			 */
255			total_bytes = rq->reserved_space + remain_actual;
256		}
257	}
258
259	if (unlikely(total_bytes > ring->space)) {
260		int ret;
261
262		/*
263		 * Space is reserved in the ringbuffer for finalising the
264		 * request, as that cannot be allowed to fail. During request
265		 * finalisation, reserved_space is set to 0 to stop the
266		 * overallocation and the assumption is that then we never need
267		 * to wait (which has the risk of failing with EINTR).
268		 *
269		 * See also i915_request_alloc() and i915_request_add().
270		 */
271		GEM_BUG_ON(!rq->reserved_space);
272
273		ret = wait_for_space(ring,
274				     i915_request_timeline(rq),
275				     total_bytes);
276		if (unlikely(ret))
277			return ERR_PTR(ret);
278	}
279
280	if (unlikely(need_wrap)) {
281		need_wrap &= ~1;
282		GEM_BUG_ON(need_wrap > ring->space);
283		GEM_BUG_ON(ring->emit + need_wrap > ring->size);
284		GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
285
286		/* Fill the tail with MI_NOOP */
287		memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
288		ring->space -= need_wrap;
289		ring->emit = 0;
290	}
291
292	GEM_BUG_ON(ring->emit > ring->size - bytes);
293	GEM_BUG_ON(ring->space < bytes);
294	cs = ring->vaddr + ring->emit;
295	GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
296	ring->emit += bytes;
297	ring->space -= bytes;
298
299	return cs;
300}
301
302/* Align the ring tail to a cacheline boundary */
303int intel_ring_cacheline_align(struct i915_request *rq)
304{
305	int num_dwords;
306	void *cs;
307
308	num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
309	if (num_dwords == 0)
310		return 0;
311
312	num_dwords = CACHELINE_DWORDS - num_dwords;
313	GEM_BUG_ON(num_dwords & 1);
314
315	cs = intel_ring_begin(rq, num_dwords);
316	if (IS_ERR(cs))
317		return PTR_ERR(cs);
318
319	memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
320	intel_ring_advance(rq, cs + num_dwords);
321
322	GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
323	return 0;
324}
325