1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian K��nig
23 */
24#ifndef __AMDGPU_VM_H__
25#define __AMDGPU_VM_H__
26
27#include <linux/idr.h>
28#include <linux/kfifo.h>
29#include <linux/rbtree.h>
30#include <drm/gpu_scheduler.h>
31#include <drm/drm_file.h>
32#include <drm/ttm/ttm_bo.h>
33#include <linux/sched/mm.h>
34
35#include "amdgpu_sync.h"
36#include "amdgpu_ring.h"
37#include "amdgpu_ids.h"
38
39struct drm_exec;
40
41struct amdgpu_bo_va;
42struct amdgpu_job;
43struct amdgpu_bo_list_entry;
44struct amdgpu_bo_vm;
45struct amdgpu_mem_stats;
46
47/*
48 * GPUVM handling
49 */
50
51/* Maximum number of PTEs the hardware can write with one command */
52#define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
53
54/* number of entries in page table */
55#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
56
57#define AMDGPU_PTE_VALID	(1ULL << 0)
58#define AMDGPU_PTE_SYSTEM	(1ULL << 1)
59#define AMDGPU_PTE_SNOOPED	(1ULL << 2)
60
61/* RV+ */
62#define AMDGPU_PTE_TMZ		(1ULL << 3)
63
64/* VI only */
65#define AMDGPU_PTE_EXECUTABLE	(1ULL << 4)
66
67#define AMDGPU_PTE_READABLE	(1ULL << 5)
68#define AMDGPU_PTE_WRITEABLE	(1ULL << 6)
69
70#define AMDGPU_PTE_FRAG(x)	((x & 0x1fULL) << 7)
71
72/* TILED for VEGA10, reserved for older ASICs  */
73#define AMDGPU_PTE_PRT		(1ULL << 51)
74
75/* PDE is handled as PTE for VEGA10 */
76#define AMDGPU_PDE_PTE		(1ULL << 54)
77
78#define AMDGPU_PTE_LOG          (1ULL << 55)
79
80/* PTE is handled as PDE for VEGA10 (Translate Further) */
81#define AMDGPU_PTE_TF		(1ULL << 56)
82
83/* MALL noalloc for sienna_cichlid, reserved for older ASICs  */
84#define AMDGPU_PTE_NOALLOC	(1ULL << 58)
85
86/* PDE Block Fragment Size for VEGA10 */
87#define AMDGPU_PDE_BFS(a)	((uint64_t)a << 59)
88
89/* Flag combination to set no-retry with TF disabled */
90#define AMDGPU_VM_NORETRY_FLAGS	(AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
91				AMDGPU_PTE_TF)
92
93/* Flag combination to set no-retry with TF enabled */
94#define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
95				   AMDGPU_PTE_PRT)
96/* For GFX9 */
97#define AMDGPU_PTE_MTYPE_VG10(a)	((uint64_t)(a) << 57)
98#define AMDGPU_PTE_MTYPE_VG10_MASK	AMDGPU_PTE_MTYPE_VG10(3ULL)
99
100#define AMDGPU_MTYPE_NC 0
101#define AMDGPU_MTYPE_CC 2
102
103#define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
104                                | AMDGPU_PTE_SNOOPED    \
105                                | AMDGPU_PTE_EXECUTABLE \
106                                | AMDGPU_PTE_READABLE   \
107                                | AMDGPU_PTE_WRITEABLE  \
108                                | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
109
110/* gfx10 */
111#define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
112#define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
113
114/* How to program VM fault handling */
115#define AMDGPU_VM_FAULT_STOP_NEVER	0
116#define AMDGPU_VM_FAULT_STOP_FIRST	1
117#define AMDGPU_VM_FAULT_STOP_ALWAYS	2
118
119/* How much VRAM be reserved for page tables */
120#define AMDGPU_VM_RESERVED_VRAM		(8ULL << 20)
121
122/*
123 * max number of VMHUB
124 * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
125 */
126#define AMDGPU_MAX_VMHUBS			13
127#define AMDGPU_GFXHUB_START			0
128#define AMDGPU_MMHUB0_START			8
129#define AMDGPU_MMHUB1_START			12
130#define AMDGPU_GFXHUB(x)			(AMDGPU_GFXHUB_START + (x))
131#define AMDGPU_MMHUB0(x)			(AMDGPU_MMHUB0_START + (x))
132#define AMDGPU_MMHUB1(x)			(AMDGPU_MMHUB1_START + (x))
133
134#define AMDGPU_IS_GFXHUB(x) ((x) >= AMDGPU_GFXHUB_START && (x) < AMDGPU_MMHUB0_START)
135#define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START)
136#define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
137
138/* Reserve space at top/bottom of address space for kernel use */
139#define AMDGPU_VA_RESERVED_CSA_SIZE		(2ULL << 20)
140#define AMDGPU_VA_RESERVED_CSA_START(adev)	(((adev)->vm_manager.max_pfn \
141						  << AMDGPU_GPU_PAGE_SHIFT)  \
142						 - AMDGPU_VA_RESERVED_CSA_SIZE)
143#define AMDGPU_VA_RESERVED_SEQ64_SIZE		(2ULL << 20)
144#define AMDGPU_VA_RESERVED_SEQ64_START(adev)	(AMDGPU_VA_RESERVED_CSA_START(adev) \
145						 - AMDGPU_VA_RESERVED_SEQ64_SIZE)
146#define AMDGPU_VA_RESERVED_TRAP_SIZE		(2ULL << 12)
147#define AMDGPU_VA_RESERVED_TRAP_START(adev)	(AMDGPU_VA_RESERVED_SEQ64_START(adev) \
148						 - AMDGPU_VA_RESERVED_TRAP_SIZE)
149#define AMDGPU_VA_RESERVED_BOTTOM		(1ULL << 16)
150#define AMDGPU_VA_RESERVED_TOP			(AMDGPU_VA_RESERVED_TRAP_SIZE + \
151						 AMDGPU_VA_RESERVED_SEQ64_SIZE + \
152						 AMDGPU_VA_RESERVED_CSA_SIZE)
153
154/* See vm_update_mode */
155#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
156#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
157
158/* VMPT level enumerate, and the hiberachy is:
159 * PDB2->PDB1->PDB0->PTB
160 */
161enum amdgpu_vm_level {
162	AMDGPU_VM_PDB2,
163	AMDGPU_VM_PDB1,
164	AMDGPU_VM_PDB0,
165	AMDGPU_VM_PTB
166};
167
168/* base structure for tracking BO usage in a VM */
169struct amdgpu_vm_bo_base {
170	/* constant after initialization */
171	struct amdgpu_vm		*vm;
172	struct amdgpu_bo		*bo;
173
174	/* protected by bo being reserved */
175	struct amdgpu_vm_bo_base	*next;
176
177	/* protected by spinlock */
178	struct list_head		vm_status;
179
180	/* protected by the BO being reserved */
181	bool				moved;
182};
183
184/* provided by hw blocks that can write ptes, e.g., sdma */
185struct amdgpu_vm_pte_funcs {
186	/* number of dw to reserve per operation */
187	unsigned	copy_pte_num_dw;
188
189	/* copy pte entries from GART */
190	void (*copy_pte)(struct amdgpu_ib *ib,
191			 uint64_t pe, uint64_t src,
192			 unsigned count);
193
194	/* write pte one entry at a time with addr mapping */
195	void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
196			  uint64_t value, unsigned count,
197			  uint32_t incr);
198	/* for linear pte/pde updates without addr mapping */
199	void (*set_pte_pde)(struct amdgpu_ib *ib,
200			    uint64_t pe,
201			    uint64_t addr, unsigned count,
202			    uint32_t incr, uint64_t flags);
203};
204
205struct amdgpu_task_info {
206	char		process_name[TASK_COMM_LEN];
207	char		task_name[TASK_COMM_LEN];
208	pid_t		pid;
209	pid_t		tgid;
210	struct kref	refcount;
211};
212
213/**
214 * struct amdgpu_vm_update_params
215 *
216 * Encapsulate some VM table update parameters to reduce
217 * the number of function parameters
218 *
219 */
220struct amdgpu_vm_update_params {
221
222	/**
223	 * @adev: amdgpu device we do this update for
224	 */
225	struct amdgpu_device *adev;
226
227	/**
228	 * @vm: optional amdgpu_vm we do this update for
229	 */
230	struct amdgpu_vm *vm;
231
232	/**
233	 * @immediate: if changes should be made immediately
234	 */
235	bool immediate;
236
237	/**
238	 * @unlocked: true if the root BO is not locked
239	 */
240	bool unlocked;
241
242	/**
243	 * @pages_addr:
244	 *
245	 * DMA addresses to use for mapping
246	 */
247	dma_addr_t *pages_addr;
248
249	/**
250	 * @job: job to used for hw submission
251	 */
252	struct amdgpu_job *job;
253
254	/**
255	 * @num_dw_left: number of dw left for the IB
256	 */
257	unsigned int num_dw_left;
258
259	/**
260	 * @table_freed: return true if page table is freed when updating
261	 */
262	bool table_freed;
263
264	/**
265	 * @allow_override: true for memory that is not uncached: allows MTYPE
266	 * to be overridden for NUMA local memory.
267	 */
268	bool allow_override;
269};
270
271struct amdgpu_vm_update_funcs {
272	int (*map_table)(struct amdgpu_bo_vm *bo);
273	int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
274		       enum amdgpu_sync_mode sync_mode);
275	int (*update)(struct amdgpu_vm_update_params *p,
276		      struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
277		      unsigned count, uint32_t incr, uint64_t flags);
278	int (*commit)(struct amdgpu_vm_update_params *p,
279		      struct dma_fence **fence);
280};
281
282struct amdgpu_vm_fault_info {
283	/* fault address */
284	uint64_t	addr;
285	/* fault status register */
286	uint32_t	status;
287	/* which vmhub? gfxhub, mmhub, etc. */
288	unsigned int	vmhub;
289};
290
291struct amdgpu_vm {
292	/* tree of virtual addresses mapped */
293	struct rb_root_cached	va;
294
295	/* Lock to prevent eviction while we are updating page tables
296	 * use vm_eviction_lock/unlock(vm)
297	 */
298	struct mutex		eviction_lock;
299	bool			evicting;
300	unsigned int		saved_flags;
301
302	/* Lock to protect vm_bo add/del/move on all lists of vm */
303	spinlock_t		status_lock;
304
305	/* Per-VM and PT BOs who needs a validation */
306	struct list_head	evicted;
307
308	/* BOs for user mode queues that need a validation */
309	struct list_head	evicted_user;
310
311	/* PT BOs which relocated and their parent need an update */
312	struct list_head	relocated;
313
314	/* per VM BOs moved, but not yet updated in the PT */
315	struct list_head	moved;
316
317	/* All BOs of this VM not currently in the state machine */
318	struct list_head	idle;
319
320	/* regular invalidated BOs, but not yet updated in the PT */
321	struct list_head	invalidated;
322
323	/* BO mappings freed, but not yet updated in the PT */
324	struct list_head	freed;
325
326	/* BOs which are invalidated, has been updated in the PTs */
327	struct list_head        done;
328
329	/* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
330	struct list_head	pt_freed;
331	struct work_struct	pt_free_work;
332
333	/* contains the page directory */
334	struct amdgpu_vm_bo_base     root;
335	struct dma_fence	*last_update;
336
337	/* Scheduler entities for page table updates */
338	struct drm_sched_entity	immediate;
339	struct drm_sched_entity	delayed;
340
341	/* Last finished delayed update */
342	atomic64_t		tlb_seq;
343	struct dma_fence	*last_tlb_flush;
344	atomic64_t		kfd_last_flushed_seq;
345
346	/* How many times we had to re-generate the page tables */
347	uint64_t		generation;
348
349	/* Last unlocked submission to the scheduler entities */
350	struct dma_fence	*last_unlocked;
351
352	unsigned int		pasid;
353	bool			reserved_vmid[AMDGPU_MAX_VMHUBS];
354
355	/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
356	bool					use_cpu_for_update;
357
358	/* Functions to use for VM table updates */
359	const struct amdgpu_vm_update_funcs	*update_funcs;
360
361	/* Up to 128 pending retry page faults */
362	DECLARE_KFIFO(faults, u64, 128);
363
364	/* Points to the KFD process VM info */
365	struct amdkfd_process_info *process_info;
366
367	/* List node in amdkfd_process_info.vm_list_head */
368	struct list_head	vm_list_node;
369
370	/* Valid while the PD is reserved or fenced */
371	uint64_t		pd_phys_addr;
372
373	/* Some basic info about the task */
374	struct amdgpu_task_info *task_info;
375
376	/* Store positions of group of BOs */
377	struct ttm_lru_bulk_move lru_bulk_move;
378	/* Flag to indicate if VM is used for compute */
379	bool			is_compute_context;
380
381	/* Memory partition number, -1 means any partition */
382	int8_t			mem_id;
383
384	/* cached fault info */
385	struct amdgpu_vm_fault_info fault_info;
386};
387
388struct amdgpu_vm_manager {
389	/* Handling of VMIDs */
390	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
391	unsigned int				first_kfd_vmid;
392	bool					concurrent_flush;
393
394	/* Handling of VM fences */
395	u64					fence_context;
396	unsigned				seqno[AMDGPU_MAX_RINGS];
397
398	uint64_t				max_pfn;
399	uint32_t				num_level;
400	uint32_t				block_size;
401	uint32_t				fragment_size;
402	enum amdgpu_vm_level			root_level;
403	/* vram base address for page table entry  */
404	u64					vram_base_offset;
405	/* vm pte handling */
406	const struct amdgpu_vm_pte_funcs	*vm_pte_funcs;
407	struct drm_gpu_scheduler		*vm_pte_scheds[AMDGPU_MAX_RINGS];
408	unsigned				vm_pte_num_scheds;
409	struct amdgpu_ring			*page_fault;
410
411	/* partial resident texture handling */
412	spinlock_t				prt_lock;
413	atomic_t				num_prt_users;
414
415	/* controls how VM page tables are updated for Graphics and Compute.
416	 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
417	 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
418	 */
419	int					vm_update_mode;
420
421	/* PASID to VM mapping, will be used in interrupt context to
422	 * look up VM of a page fault
423	 */
424	struct xarray				pasids;
425};
426
427struct amdgpu_bo_va_mapping;
428
429#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
430#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
431#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
432
433extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
434extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
435
436void amdgpu_vm_manager_init(struct amdgpu_device *adev);
437void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
438
439int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
440			u32 pasid);
441
442long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
443int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
444int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
445void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
446void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
447int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
448		      unsigned int num_fences);
449bool amdgpu_vm_ready(struct amdgpu_vm *vm);
450uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
451int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
452		       struct ww_acquire_ctx *ticket,
453		       int (*callback)(void *p, struct amdgpu_bo *bo),
454		       void *param);
455int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
456int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
457			  struct amdgpu_vm *vm, bool immediate);
458int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
459			  struct amdgpu_vm *vm,
460			  struct dma_fence **fence);
461int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
462			   struct amdgpu_vm *vm,
463			   struct ww_acquire_ctx *ticket);
464int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
465				struct amdgpu_vm *vm,
466				uint32_t flush_type,
467				uint32_t xcc_mask);
468void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
469			    struct amdgpu_vm *vm, struct amdgpu_bo *bo);
470int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
471			   bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
472			   struct dma_resv *resv, uint64_t start, uint64_t last,
473			   uint64_t flags, uint64_t offset, uint64_t vram_base,
474			   struct ttm_resource *res, dma_addr_t *pages_addr,
475			   struct dma_fence **fence);
476int amdgpu_vm_bo_update(struct amdgpu_device *adev,
477			struct amdgpu_bo_va *bo_va,
478			bool clear);
479bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
480void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
481			     struct amdgpu_bo *bo, bool evicted);
482uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
483struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
484				       struct amdgpu_bo *bo);
485struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
486				      struct amdgpu_vm *vm,
487				      struct amdgpu_bo *bo);
488int amdgpu_vm_bo_map(struct amdgpu_device *adev,
489		     struct amdgpu_bo_va *bo_va,
490		     uint64_t addr, uint64_t offset,
491		     uint64_t size, uint64_t flags);
492int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
493			     struct amdgpu_bo_va *bo_va,
494			     uint64_t addr, uint64_t offset,
495			     uint64_t size, uint64_t flags);
496int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
497		       struct amdgpu_bo_va *bo_va,
498		       uint64_t addr);
499int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
500				struct amdgpu_vm *vm,
501				uint64_t saddr, uint64_t size);
502struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
503							 uint64_t addr);
504void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
505void amdgpu_vm_bo_del(struct amdgpu_device *adev,
506		      struct amdgpu_bo_va *bo_va);
507void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
508			   uint32_t fragment_size_default, unsigned max_level,
509			   unsigned max_bits);
510int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
511bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
512				  struct amdgpu_job *job);
513void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
514
515struct amdgpu_task_info *
516amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid);
517
518struct amdgpu_task_info *
519amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
520
521void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
522
523bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
524			    u32 vmid, u32 node_id, uint64_t addr,
525			    bool write_fault);
526
527void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
528
529void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
530				struct amdgpu_vm *vm);
531void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
532			  struct amdgpu_mem_stats *stats);
533
534int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
535		       struct amdgpu_bo_vm *vmbo, bool immediate);
536int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
537			int level, bool immediate, struct amdgpu_bo_vm **vmbo,
538			int32_t xcp_id);
539void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
540
541int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
542			 struct amdgpu_vm_bo_base *entry);
543int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
544			  uint64_t start, uint64_t end,
545			  uint64_t dst, uint64_t flags);
546void amdgpu_vm_pt_free_work(struct work_struct *work);
547
548#if defined(CONFIG_DEBUG_FS)
549void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
550#endif
551
552int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
553
554/**
555 * amdgpu_vm_tlb_seq - return tlb flush sequence number
556 * @vm: the amdgpu_vm structure to query
557 *
558 * Returns the tlb flush sequence number which indicates that the VM TLBs needs
559 * to be invalidated whenever the sequence number change.
560 */
561static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
562{
563	unsigned long flags;
564	spinlock_t *lock;
565
566	/*
567	 * Workaround to stop racing between the fence signaling and handling
568	 * the cb. The lock is static after initially setting it up, just make
569	 * sure that the dma_fence structure isn't freed up.
570	 */
571	rcu_read_lock();
572	lock = vm->last_tlb_flush->lock;
573	rcu_read_unlock();
574
575	spin_lock_irqsave(lock, flags);
576	spin_unlock_irqrestore(lock, flags);
577
578	return atomic64_read(&vm->tlb_seq);
579}
580
581/*
582 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
583 * happens while holding this lock anywhere to prevent deadlocks when
584 * an MMU notifier runs in reclaim-FS context.
585 */
586static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
587{
588	mutex_lock(&vm->eviction_lock);
589	vm->saved_flags = memalloc_noreclaim_save();
590}
591
592static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
593{
594	if (mutex_trylock(&vm->eviction_lock)) {
595		vm->saved_flags = memalloc_noreclaim_save();
596		return true;
597	}
598	return false;
599}
600
601static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
602{
603	memalloc_noreclaim_restore(vm->saved_flags);
604	mutex_unlock(&vm->eviction_lock);
605}
606
607void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
608				  unsigned int pasid,
609				  uint64_t addr,
610				  uint32_t status,
611				  unsigned int vmhub);
612
613#endif
614