1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/*
24 * This file defines the private interface between the
25 * AMD kernel graphics drivers and the AMD KFD.
26 */
27
28#ifndef KGD_KFD_INTERFACE_H_INCLUDED
29#define KGD_KFD_INTERFACE_H_INCLUDED
30
31#include <linux/types.h>
32#include <linux/bitmap.h>
33#include <linux/dma-fence.h>
34#include "amdgpu_irq.h"
35#include "amdgpu_gfx.h"
36
37struct pci_dev;
38struct amdgpu_device;
39
40struct kfd_dev;
41struct kgd_mem;
42
43enum kfd_preempt_type {
44	KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
45	KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
46	KFD_PREEMPT_TYPE_WAVEFRONT_SAVE
47};
48
49struct kfd_vm_fault_info {
50	uint64_t	page_addr;
51	uint32_t	vmid;
52	uint32_t	mc_id;
53	uint32_t	status;
54	bool		prot_valid;
55	bool		prot_read;
56	bool		prot_write;
57	bool		prot_exec;
58};
59
60/* For getting GPU local memory information from KGD */
61struct kfd_local_mem_info {
62	uint64_t local_mem_size_private;
63	uint64_t local_mem_size_public;
64	uint32_t vram_width;
65	uint32_t mem_clk_max;
66};
67
68enum kgd_memory_pool {
69	KGD_POOL_SYSTEM_CACHEABLE = 1,
70	KGD_POOL_SYSTEM_WRITECOMBINE = 2,
71	KGD_POOL_FRAMEBUFFER = 3,
72};
73
74/**
75 * enum kfd_sched_policy
76 *
77 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
78 * scheduling. In this scheduling mode we're using the firmware code to
79 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
80 * the HIQ queue is used as a special queue that dispatches the configuration
81 * to the cp and the user mode queues list that are currently running.
82 * the DIQ queue is a debugging queue that dispatches debugging commands to the
83 * firmware.
84 * in this scheduling mode user mode queues over subscription feature is
85 * enabled.
86 *
87 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
88 * subscription feature disabled.
89 *
90 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
91 * set the command processor registers and sets the queues "manually". This
92 * mode is used *ONLY* for debugging proposes.
93 *
94 */
95enum kfd_sched_policy {
96	KFD_SCHED_POLICY_HWS = 0,
97	KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
98	KFD_SCHED_POLICY_NO_HWS
99};
100
101struct kgd2kfd_shared_resources {
102	/* Bit n == 1 means VMID n is available for KFD. */
103	unsigned int compute_vmid_bitmap;
104
105	/* number of pipes per mec */
106	uint32_t num_pipe_per_mec;
107
108	/* number of queues per pipe */
109	uint32_t num_queue_per_pipe;
110
111	/* Bit n == 1 means Queue n is available for KFD */
112	DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES);
113
114	/* SDMA doorbell assignments (SOC15 and later chips only). Only
115	 * specific doorbells are routed to each SDMA engine. Others
116	 * are routed to IH and VCN. They are not usable by the CP.
117	 */
118	uint32_t *sdma_doorbell_idx;
119
120	/* From SOC15 onward, the doorbell index range not usable for CP
121	 * queues.
122	 */
123	uint32_t non_cp_doorbells_start;
124	uint32_t non_cp_doorbells_end;
125
126	/* Base address of doorbell aperture. */
127	phys_addr_t doorbell_physical_address;
128
129	/* Size in bytes of doorbell aperture. */
130	size_t doorbell_aperture_size;
131
132	/* Number of bytes at start of aperture reserved for KGD. */
133	size_t doorbell_start_offset;
134
135	/* GPUVM address space size in bytes */
136	uint64_t gpuvm_size;
137
138	/* Minor device number of the render node */
139	int drm_render_minor;
140
141	bool enable_mes;
142};
143
144struct tile_config {
145	uint32_t *tile_config_ptr;
146	uint32_t *macro_tile_config_ptr;
147	uint32_t num_tile_configs;
148	uint32_t num_macro_tile_configs;
149
150	uint32_t gb_addr_config;
151	uint32_t num_banks;
152	uint32_t num_ranks;
153};
154
155#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
156
157/**
158 * struct kfd2kgd_calls
159 *
160 * @program_sh_mem_settings: A function that should initiate the memory
161 * properties such as main aperture memory type (cache / non cached) and
162 * secondary aperture base address, size and memory type.
163 * This function is used only for no cp scheduling mode.
164 *
165 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
166 * scheduling mode. Only used for no cp scheduling mode.
167 *
168 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
169 * sceduling mode.
170 *
171 * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
172 * used only for no HWS mode.
173 *
174 * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
175 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
176 *
177 * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
178 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
179 *
180 * @hqd_is_occupies: Checks if a hqd slot is occupied.
181 *
182 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
183 *
184 * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
185 *
186 * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
187 * SDMA hqd slot.
188 *
189 * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
190 * Only used for no cp scheduling mode
191 *
192 * @set_vm_context_page_table_base: Program page table base for a VMID
193 *
194 * @invalidate_tlbs: Invalidate TLBs for a specific PASID
195 *
196 * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
197 *
198 * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
199 * IH ring entry. This function allows the KFD ISR to get the VMID
200 * from the fault status register as early as possible.
201 *
202 * @get_cu_occupancy: Function pointer that returns to caller the number
203 * of wave fronts that are in flight for all of the queues of a process
204 * as identified by its pasid. It is important to note that the value
205 * returned by this function is a snapshot of current moment and cannot
206 * guarantee any minimum for the number of waves in-flight. This function
207 * is defined for devices that belong to GFX9 and later GFX families. Care
208 * must be taken in calling this function as it is not defined for devices
209 * that belong to GFX8 and below GFX families.
210 *
211 * This structure contains function pointers to services that the kgd driver
212 * provides to amdkfd driver.
213 *
214 */
215struct kfd2kgd_calls {
216	/* Register access functions */
217	void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid,
218			uint32_t sh_mem_config,	uint32_t sh_mem_ape1_base,
219			uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases,
220			uint32_t inst);
221
222	int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid,
223					unsigned int vmid, uint32_t inst);
224
225	int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id,
226			uint32_t inst);
227
228	int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
229			uint32_t queue_id, uint32_t __user *wptr,
230			uint32_t wptr_shift, uint32_t wptr_mask,
231			struct mm_struct *mm, uint32_t inst);
232
233	int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd,
234			    uint32_t pipe_id, uint32_t queue_id,
235			    uint32_t doorbell_off, uint32_t inst);
236
237	int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd,
238			     uint32_t __user *wptr, struct mm_struct *mm);
239
240	int (*hqd_dump)(struct amdgpu_device *adev,
241			uint32_t pipe_id, uint32_t queue_id,
242			uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst);
243
244	int (*hqd_sdma_dump)(struct amdgpu_device *adev,
245			     uint32_t engine_id, uint32_t queue_id,
246			     uint32_t (**dump)[2], uint32_t *n_regs);
247
248	bool (*hqd_is_occupied)(struct amdgpu_device *adev,
249				uint64_t queue_address, uint32_t pipe_id,
250				uint32_t queue_id, uint32_t inst);
251
252	int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
253				enum kfd_preempt_type reset_type,
254				unsigned int timeout, uint32_t pipe_id,
255				uint32_t queue_id, uint32_t inst);
256
257	bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
258
259	int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd,
260				unsigned int timeout);
261
262	int (*wave_control_execute)(struct amdgpu_device *adev,
263					uint32_t gfx_index_val,
264					uint32_t sq_cmd, uint32_t inst);
265	bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev,
266					uint8_t vmid,
267					uint16_t *p_pasid);
268
269	/* No longer needed from GFXv9 onward. The scratch base address is
270	 * passed to the shader by the CP. It's the user mode driver's
271	 * responsibility.
272	 */
273	void (*set_scratch_backing_va)(struct amdgpu_device *adev,
274				uint64_t va, uint32_t vmid);
275
276	void (*set_vm_context_page_table_base)(struct amdgpu_device *adev,
277			uint32_t vmid, uint64_t page_table_base);
278	uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev);
279
280	uint32_t (*enable_debug_trap)(struct amdgpu_device *adev,
281					bool restore_dbg_registers,
282					uint32_t vmid);
283	uint32_t (*disable_debug_trap)(struct amdgpu_device *adev,
284					bool keep_trap_enabled,
285					uint32_t vmid);
286	int (*validate_trap_override_request)(struct amdgpu_device *adev,
287					uint32_t trap_override,
288					uint32_t *trap_mask_supported);
289	uint32_t (*set_wave_launch_trap_override)(struct amdgpu_device *adev,
290					     uint32_t vmid,
291					     uint32_t trap_override,
292					     uint32_t trap_mask_bits,
293					     uint32_t trap_mask_request,
294					     uint32_t *trap_mask_prev,
295					     uint32_t kfd_dbg_trap_cntl_prev);
296	uint32_t (*set_wave_launch_mode)(struct amdgpu_device *adev,
297					uint8_t wave_launch_mode,
298					uint32_t vmid);
299	uint32_t (*set_address_watch)(struct amdgpu_device *adev,
300					uint64_t watch_address,
301					uint32_t watch_address_mask,
302					uint32_t watch_id,
303					uint32_t watch_mode,
304					uint32_t debug_vmid,
305					uint32_t inst);
306	uint32_t (*clear_address_watch)(struct amdgpu_device *adev,
307			uint32_t watch_id);
308	void (*get_iq_wait_times)(struct amdgpu_device *adev,
309			uint32_t *wait_times,
310			uint32_t inst);
311	void (*build_grace_period_packet_info)(struct amdgpu_device *adev,
312			uint32_t wait_times,
313			uint32_t grace_period,
314			uint32_t *reg_offset,
315			uint32_t *reg_data);
316	void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
317			int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
318	void (*program_trap_handler_settings)(struct amdgpu_device *adev,
319			uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
320			uint32_t inst);
321};
322
323#endif	/* KGD_KFD_INTERFACE_H_INCLUDED */
324