1/*	$NetBSD: amdgpu_ring.h,v 1.3 2021/12/19 10:59:01 riastradh Exp $	*/
2
3/*
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Christian K��nig
25 */
26#ifndef __AMDGPU_RING_H__
27#define __AMDGPU_RING_H__
28
29#include <linux/idr.h>
30
31#include <drm/amdgpu_drm.h>
32#include <drm/gpu_scheduler.h>
33#include <drm/drm_print.h>
34
35/* max number of rings */
36#define AMDGPU_MAX_RINGS		28
37#define AMDGPU_MAX_GFX_RINGS		2
38#define AMDGPU_MAX_COMPUTE_RINGS	8
39#define AMDGPU_MAX_VCE_RINGS		3
40#define AMDGPU_MAX_UVD_ENC_RINGS	2
41
42/* some special values for the owner field */
43#define AMDGPU_FENCE_OWNER_UNDEFINED	((void *)0ul)
44#define AMDGPU_FENCE_OWNER_VM		((void *)1ul)
45#define AMDGPU_FENCE_OWNER_KFD		((void *)2ul)
46
47#define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
48#define AMDGPU_FENCE_FLAG_INT           (1 << 1)
49#define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
50
51#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
52
53enum amdgpu_ring_type {
54	AMDGPU_RING_TYPE_GFX,
55	AMDGPU_RING_TYPE_COMPUTE,
56	AMDGPU_RING_TYPE_SDMA,
57	AMDGPU_RING_TYPE_UVD,
58	AMDGPU_RING_TYPE_VCE,
59	AMDGPU_RING_TYPE_KIQ,
60	AMDGPU_RING_TYPE_UVD_ENC,
61	AMDGPU_RING_TYPE_VCN_DEC,
62	AMDGPU_RING_TYPE_VCN_ENC,
63	AMDGPU_RING_TYPE_VCN_JPEG
64};
65
66struct amdgpu_device;
67struct amdgpu_ring;
68struct amdgpu_ib;
69struct amdgpu_cs_parser;
70struct amdgpu_job;
71
72/*
73 * Fences.
74 */
75struct amdgpu_fence_driver {
76	uint64_t			gpu_addr;
77	volatile uint32_t		*cpu_addr;
78	/* sync_seq is protected by ring emission lock */
79	uint32_t			sync_seq;
80	atomic_t			last_seq;
81	bool				initialized;
82	struct amdgpu_irq_src		*irq_src;
83	unsigned			irq_type;
84	struct timer_list		fallback_timer;
85	unsigned			num_fences_mask;
86	spinlock_t			lock;
87	struct dma_fence		**fences;
88};
89
90int amdgpu_fence_driver_init(struct amdgpu_device *adev);
91void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
92void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
93
94int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
95				  unsigned num_hw_submission);
96int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
97				   struct amdgpu_irq_src *irq_src,
98				   unsigned irq_type);
99void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
100void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
101int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
102		      unsigned flags);
103int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
104bool amdgpu_fence_process(struct amdgpu_ring *ring);
105int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
106signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
107				      uint32_t wait_seq,
108				      signed long timeout);
109unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
110
111/*
112 * Rings.
113 */
114
115/* provided by hw blocks that expose a ring buffer for commands */
116struct amdgpu_ring_funcs {
117	enum amdgpu_ring_type	type;
118	uint32_t		align_mask;
119	u32			nop;
120	bool			support_64bit_ptrs;
121	bool			no_user_fence;
122	unsigned		vmhub;
123	unsigned		extra_dw;
124
125	/* ring read/write ptr handling */
126	u64 (*get_rptr)(struct amdgpu_ring *ring);
127	u64 (*get_wptr)(struct amdgpu_ring *ring);
128	void (*set_wptr)(struct amdgpu_ring *ring);
129	/* validating and patching of IBs */
130	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
131	int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
132	/* constants to calculate how many DW are needed for an emit */
133	unsigned emit_frame_size;
134	unsigned emit_ib_size;
135	/* command emit functions */
136	void (*emit_ib)(struct amdgpu_ring *ring,
137			struct amdgpu_job *job,
138			struct amdgpu_ib *ib,
139			uint32_t flags);
140	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
141			   uint64_t seq, unsigned flags);
142	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
143	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
144			      uint64_t pd_addr);
145	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
146	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
147				uint32_t gds_base, uint32_t gds_size,
148				uint32_t gws_base, uint32_t gws_size,
149				uint32_t oa_base, uint32_t oa_size);
150	/* testing functions */
151	int (*test_ring)(struct amdgpu_ring *ring);
152	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
153	/* insert NOP packets */
154	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
155	void (*insert_start)(struct amdgpu_ring *ring);
156	void (*insert_end)(struct amdgpu_ring *ring);
157	/* pad the indirect buffer to the necessary number of dw */
158	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
159	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
160	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
161	/* note usage for clock and power gating */
162	void (*begin_use)(struct amdgpu_ring *ring);
163	void (*end_use)(struct amdgpu_ring *ring);
164	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
165	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
166	void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
167	void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
168	void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
169			      uint32_t val, uint32_t mask);
170	void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
171					uint32_t reg0, uint32_t reg1,
172					uint32_t ref, uint32_t mask);
173	void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
174	/* priority functions */
175	void (*set_priority) (struct amdgpu_ring *ring,
176			      enum drm_sched_priority priority);
177	/* Try to soft recover the ring to make the fence signal */
178	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
179	int (*preempt_ib)(struct amdgpu_ring *ring);
180};
181
182struct amdgpu_ring {
183	struct amdgpu_device		*adev;
184	const struct amdgpu_ring_funcs	*funcs;
185	struct amdgpu_fence_driver	fence_drv;
186	struct drm_gpu_scheduler	sched;
187
188	struct amdgpu_bo	*ring_obj;
189	volatile uint32_t	*ring;
190	unsigned		rptr_offs;
191	u64			wptr;
192	u64			wptr_old;
193	unsigned		ring_size;
194	unsigned		max_dw;
195	int			count_dw;
196	uint64_t		gpu_addr;
197	uint64_t		ptr_mask;
198	uint32_t		buf_mask;
199	u32			idx;
200	u32			me;
201	u32			pipe;
202	u32			queue;
203	struct amdgpu_bo	*mqd_obj;
204	uint64_t                mqd_gpu_addr;
205	void                    *mqd_ptr;
206	uint64_t                eop_gpu_addr;
207	u32			doorbell_index;
208	bool			use_doorbell;
209	bool			use_pollmem;
210	unsigned		wptr_offs;
211	unsigned		fence_offs;
212	uint64_t		current_ctx;
213	char			name[16];
214	u32                     trail_seq;
215	unsigned		trail_fence_offs;
216	u64			trail_fence_gpu_addr;
217	volatile u32		*trail_fence_cpu_addr;
218	unsigned		cond_exe_offs;
219	u64			cond_exe_gpu_addr;
220	volatile u32		*cond_exe_cpu_addr;
221	unsigned		vm_inv_eng;
222	struct dma_fence	*vmid_wait;
223	bool			has_compute_vm_bug;
224
225	atomic_t		num_jobs[DRM_SCHED_PRIORITY_MAX];
226	struct mutex		priority_mutex;
227	/* protected by priority_mutex */
228	int			priority;
229
230#if defined(CONFIG_DEBUG_FS)
231	struct dentry *ent;
232#endif
233};
234
235#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
236#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
237#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
238#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
239#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
240#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
241#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
242#define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
243#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
244#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
245#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
246#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
247#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
248#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
249#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
250#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
251#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
252#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
253#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
254#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
255#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
256#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
257#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
258#define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
259
260int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
261void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
262void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
263void amdgpu_ring_commit(struct amdgpu_ring *ring);
264void amdgpu_ring_undo(struct amdgpu_ring *ring);
265void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
266			      enum drm_sched_priority priority);
267void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
268			      enum drm_sched_priority priority);
269int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
270		     unsigned ring_size, struct amdgpu_irq_src *irq_src,
271		     unsigned irq_type);
272void amdgpu_ring_fini(struct amdgpu_ring *ring);
273void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
274						uint32_t reg0, uint32_t val0,
275						uint32_t reg1, uint32_t val1);
276bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
277			       struct dma_fence *fence);
278
279static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
280							bool cond_exec)
281{
282	*ring->cond_exe_cpu_addr = cond_exec;
283}
284
285static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
286{
287	int i = 0;
288	while (i <= ring->buf_mask)
289		ring->ring[i++] = ring->funcs->nop;
290
291}
292
293static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
294{
295	if (ring->count_dw <= 0)
296		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
297	ring->ring[ring->wptr++ & ring->buf_mask] = v;
298	ring->wptr &= ring->ptr_mask;
299	ring->count_dw--;
300}
301
302static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
303					      void *src, int count_dw)
304{
305	unsigned occupied, chunk1, chunk2;
306	void *dst;
307
308	if (unlikely(ring->count_dw < count_dw))
309		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
310
311	occupied = ring->wptr & ring->buf_mask;
312	dst = __UNVOLATILE(&ring->ring[occupied]);
313	chunk1 = ring->buf_mask + 1 - occupied;
314	chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
315	chunk2 = count_dw - chunk1;
316	chunk1 <<= 2;
317	chunk2 <<= 2;
318
319	if (chunk1)
320		memcpy(dst, src, chunk1);
321
322	if (chunk2) {
323		src += chunk1;
324		dst = __UNVOLATILE(ring->ring);
325		memcpy(dst, src, chunk2);
326	}
327
328	ring->wptr += count_dw;
329	ring->wptr &= ring->ptr_mask;
330	ring->count_dw -= count_dw;
331}
332
333int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
334
335#endif
336