1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#include "kfd_mqd_manager.h"
26#include "amdgpu_amdkfd.h"
27#include "kfd_device_queue_manager.h"
28
29/* Mapping queue priority to pipe priority, indexed by queue priority */
30int pipe_priority_map[] = {
31	KFD_PIPE_PRIORITY_CS_LOW,
32	KFD_PIPE_PRIORITY_CS_LOW,
33	KFD_PIPE_PRIORITY_CS_LOW,
34	KFD_PIPE_PRIORITY_CS_LOW,
35	KFD_PIPE_PRIORITY_CS_LOW,
36	KFD_PIPE_PRIORITY_CS_LOW,
37	KFD_PIPE_PRIORITY_CS_LOW,
38	KFD_PIPE_PRIORITY_CS_MEDIUM,
39	KFD_PIPE_PRIORITY_CS_MEDIUM,
40	KFD_PIPE_PRIORITY_CS_MEDIUM,
41	KFD_PIPE_PRIORITY_CS_MEDIUM,
42	KFD_PIPE_PRIORITY_CS_HIGH,
43	KFD_PIPE_PRIORITY_CS_HIGH,
44	KFD_PIPE_PRIORITY_CS_HIGH,
45	KFD_PIPE_PRIORITY_CS_HIGH,
46	KFD_PIPE_PRIORITY_CS_HIGH
47};
48
49struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q)
50{
51	struct kfd_mem_obj *mqd_mem_obj;
52
53	mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
54	if (!mqd_mem_obj)
55		return NULL;
56
57	mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem;
58	mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr;
59	mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr;
60
61	return mqd_mem_obj;
62}
63
64struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev,
65					struct queue_properties *q)
66{
67	struct kfd_mem_obj *mqd_mem_obj;
68	uint64_t offset;
69
70	mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
71	if (!mqd_mem_obj)
72		return NULL;
73
74	offset = (q->sdma_engine_id *
75		dev->kfd->device_info.num_sdma_queues_per_engine +
76		q->sdma_queue_id) *
77		dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
78
79	offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
80		  NUM_XCC(dev->xcc_mask);
81
82	mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem
83				+ offset);
84	mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
85	mqd_mem_obj->cpu_ptr = (uint32_t *)((uint64_t)
86				dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
87
88	return mqd_mem_obj;
89}
90
91void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
92			struct kfd_mem_obj *mqd_mem_obj)
93{
94	WARN_ON(!mqd_mem_obj->gtt_mem);
95	kfree(mqd_mem_obj);
96}
97
98void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
99		const uint32_t *cu_mask, uint32_t cu_mask_count,
100		uint32_t *se_mask, uint32_t inst)
101{
102	struct amdgpu_cu_info *cu_info = &mm->dev->adev->gfx.cu_info;
103	struct amdgpu_gfx_config *gfx_info = &mm->dev->adev->gfx.config;
104	uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
105	bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
106	uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
107	int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1;
108	uint32_t cu_active_per_node;
109	int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
110	int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
111
112	cu_active_per_node = cu_info->number / mm->dev->kfd->num_nodes;
113	if (cu_mask_count > cu_active_per_node)
114		cu_mask_count = cu_active_per_node;
115
116	/* Exceeding these bounds corrupts the stack and indicates a coding error.
117	 * Returning with no CU's enabled will hang the queue, which should be
118	 * attention grabbing.
119	 */
120	if (gfx_info->max_shader_engines > KFD_MAX_NUM_SE) {
121		pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n",
122		       gfx_info->max_shader_engines);
123		return;
124	}
125	if (gfx_info->max_sh_per_se > KFD_MAX_NUM_SH_PER_SE) {
126		pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
127			gfx_info->max_sh_per_se * gfx_info->max_shader_engines);
128		return;
129	}
130
131	cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) &&
132			    KFD_GC_VERSION(mm->dev) < IP_VERSION(12, 0, 0)) ? 2 : 1;
133
134	/* Count active CUs per SH.
135	 *
136	 * Some CUs in an SH may be disabled.	HW expects disabled CUs to be
137	 * represented in the high bits of each SH's enable mask (the upper and lower
138	 * 16 bits of se_mask) and will take care of the actual distribution of
139	 * disabled CUs within each SH automatically.
140	 * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
141	 *
142	 * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
143	 * See note on GFX11 cu_bitmap layout in gfx_v11_0_get_cu_info.
144	 */
145	for (se = 0; se < gfx_info->max_shader_engines; se++)
146		for (sh = 0; sh < gfx_info->max_sh_per_se; sh++)
147			cu_per_sh[se][sh] = hweight32(
148				cu_info->bitmap[xcc_inst][se % 4][sh + (se / 4) *
149				cu_bitmap_sh_mul]);
150
151	/* Symmetrically map cu_mask to all SEs & SHs:
152	 * se_mask programs up to 2 SH in the upper and lower 16 bits.
153	 *
154	 * Examples
155	 * Assuming 1 SH/SE, 4 SEs:
156	 * cu_mask[0] bit0 -> se_mask[0] bit0
157	 * cu_mask[0] bit1 -> se_mask[1] bit0
158	 * ...
159	 * cu_mask[0] bit4 -> se_mask[0] bit1
160	 * ...
161	 *
162	 * Assuming 2 SH/SE, 4 SEs
163	 * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
164	 * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
165	 * ...
166	 * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
167	 * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
168	 * ...
169	 * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
170	 * ...
171	 *
172	 * For GFX 9.4.3, the following code only looks at a
173	 * subset of the cu_mask corresponding to the inst parameter.
174	 * If we have n XCCs under one GPU node
175	 * cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0)
176	 * cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0)
177	 * ..
178	 * cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0)
179	 * cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0)
180	 *
181	 * For example, if there are 6 XCCs under 1 KFD node, this code
182	 * running for each inst, will look at the bits as:
183	 * inst, inst + 6, inst + 12...
184	 *
185	 * First ensure all CUs are disabled, then enable user specified CUs.
186	 */
187	for (i = 0; i < gfx_info->max_shader_engines; i++)
188		se_mask[i] = 0;
189
190	i = inst;
191	for (cu = 0; cu < 16; cu += cu_inc) {
192		for (sh = 0; sh < gfx_info->max_sh_per_se; sh++) {
193			for (se = 0; se < gfx_info->max_shader_engines; se++) {
194				if (cu_per_sh[se][sh] > cu) {
195					if (cu_mask[i / 32] & (en_mask << (i % 32)))
196						se_mask[se] |= en_mask << (cu + sh * 16);
197					i += inc;
198					if (i >= cu_mask_count)
199						return;
200				}
201			}
202		}
203	}
204}
205
206int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
207		     uint32_t pipe_id, uint32_t queue_id,
208		     struct queue_properties *p, struct mm_struct *mms)
209{
210	return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
211					      queue_id, p->doorbell_off, 0);
212}
213
214int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
215		enum kfd_preempt_type type, unsigned int timeout,
216		uint32_t pipe_id, uint32_t queue_id)
217{
218	return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
219						pipe_id, queue_id, 0);
220}
221
222void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
223	      struct kfd_mem_obj *mqd_mem_obj)
224{
225	if (mqd_mem_obj->gtt_mem) {
226		amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
227		kfree(mqd_mem_obj);
228	} else {
229		kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
230	}
231}
232
233bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
234		 uint64_t queue_address, uint32_t pipe_id,
235		 uint32_t queue_id)
236{
237	return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
238						pipe_id, queue_id, 0);
239}
240
241int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
242		  uint32_t pipe_id, uint32_t queue_id,
243		  struct queue_properties *p, struct mm_struct *mms)
244{
245	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
246						(uint32_t __user *)p->write_ptr,
247						mms);
248}
249
250/*
251 * preempt type here is ignored because there is only one way
252 * to preempt sdma queue
253 */
254int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
255		     enum kfd_preempt_type type,
256		     unsigned int timeout, uint32_t pipe_id,
257		     uint32_t queue_id)
258{
259	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
260}
261
262bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
263		      uint64_t queue_address, uint32_t pipe_id,
264		      uint32_t queue_id)
265{
266	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
267}
268
269uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev)
270{
271	return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
272}
273
274void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd_mem_obj,
275		     uint32_t virtual_xcc_id)
276{
277	uint64_t offset;
278
279	offset = kfd_hiq_mqd_stride(dev) * virtual_xcc_id;
280
281	mqd_mem_obj->gtt_mem = (virtual_xcc_id == 0) ?
282			dev->dqm->hiq_sdma_mqd.gtt_mem : NULL;
283	mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
284	mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)
285				dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
286}
287
288uint64_t kfd_mqd_stride(struct mqd_manager *mm,
289			struct queue_properties *q)
290{
291	return mm->mqd_size;
292}
293