1/*	$NetBSD: amdgpu_object.h,v 1.5 2021/12/18 23:44:58 riastradh Exp $	*/
2
3/*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 *          Alex Deucher
28 *          Jerome Glisse
29 */
30#ifndef __AMDGPU_OBJECT_H__
31#define __AMDGPU_OBJECT_H__
32
33#include <drm/amdgpu_drm.h>
34#include "amdgpu.h"
35#ifdef CONFIG_MMU_NOTIFIER
36#include <linux/mmu_notifier.h>
37#endif
38
39#define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
40#define AMDGPU_BO_MAX_PLACEMENTS	3
41
42struct amdgpu_bo_param {
43	unsigned long			size;
44	int				byte_align;
45	u32				domain;
46	u32				preferred_domain;
47	u64				flags;
48	enum ttm_bo_type		type;
49	bool				no_wait_gpu;
50	struct dma_resv	*resv;
51};
52
53/* bo virtual addresses in a vm */
54struct amdgpu_bo_va_mapping {
55	struct amdgpu_bo_va		*bo_va;
56	struct list_head		list;
57	struct rb_node			rb;
58	uint64_t			start;
59	uint64_t			last;
60	uint64_t			__subtree_last;
61	uint64_t			offset;
62	uint64_t			flags;
63};
64
65/* User space allocated BO in a VM */
66struct amdgpu_bo_va {
67	struct amdgpu_vm_bo_base	base;
68
69	/* protected by bo being reserved */
70	unsigned			ref_count;
71
72	/* all other members protected by the VM PD being reserved */
73	struct dma_fence	        *last_pt_update;
74
75	/* mappings for this bo_va */
76	struct list_head		invalids;
77	struct list_head		valids;
78
79	/* If the mappings are cleared or filled */
80	bool				cleared;
81
82	bool				is_xgmi;
83};
84
85struct amdgpu_bo {
86	/* Protected by tbo.reserved */
87	u32				preferred_domains;
88	u32				allowed_domains;
89	struct ttm_place		placements[AMDGPU_BO_MAX_PLACEMENTS];
90	struct ttm_placement		placement;
91	struct ttm_buffer_object	tbo;
92	struct ttm_bo_kmap_obj		kmap;
93	u64				flags;
94	unsigned			pin_count;
95	u64				tiling_flags;
96	u64				metadata_flags;
97	void				*metadata;
98	u32				metadata_size;
99	unsigned			prime_shared_count;
100	/* per VM structure for page tables and with virtual addresses */
101	struct amdgpu_vm_bo_base	*vm_bo;
102	/* Constant after initialization */
103	struct amdgpu_bo		*parent;
104	struct amdgpu_bo		*shadow;
105
106	struct ttm_bo_kmap_obj		dma_buf_vmap;
107	struct amdgpu_mn		*mn;
108
109
110#ifdef CONFIG_MMU_NOTIFIER
111	struct mmu_interval_notifier	notifier;
112#endif
113
114	struct list_head		shadow_list;
115
116	struct kgd_mem                  *kfd_bo;
117};
118
119static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
120{
121	return container_of(tbo, struct amdgpu_bo, tbo);
122}
123
124/**
125 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
126 * @mem_type:	ttm memory type
127 *
128 * Returns corresponding domain of the ttm mem_type
129 */
130static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
131{
132	switch (mem_type) {
133	case TTM_PL_VRAM:
134		return AMDGPU_GEM_DOMAIN_VRAM;
135	case TTM_PL_TT:
136		return AMDGPU_GEM_DOMAIN_GTT;
137	case TTM_PL_SYSTEM:
138		return AMDGPU_GEM_DOMAIN_CPU;
139	case AMDGPU_PL_GDS:
140		return AMDGPU_GEM_DOMAIN_GDS;
141	case AMDGPU_PL_GWS:
142		return AMDGPU_GEM_DOMAIN_GWS;
143	case AMDGPU_PL_OA:
144		return AMDGPU_GEM_DOMAIN_OA;
145	default:
146		break;
147	}
148	return 0;
149}
150
151/**
152 * amdgpu_bo_reserve - reserve bo
153 * @bo:		bo structure
154 * @no_intr:	don't return -ERESTARTSYS on pending signal
155 *
156 * Returns:
157 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
158 * a signal. Release all buffer reservations and return to user-space.
159 */
160static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
161{
162	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
163	int r;
164
165	r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
166	if (unlikely(r != 0)) {
167		if (r != -ERESTARTSYS)
168			dev_err(adev->dev, "%p reserve failed\n", bo);
169		return r;
170	}
171	return 0;
172}
173
174static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
175{
176	ttm_bo_unreserve(&bo->tbo);
177}
178
179static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
180{
181	return bo->tbo.num_pages << PAGE_SHIFT;
182}
183
184static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
185{
186	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
187}
188
189static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
190{
191	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
192}
193
194/**
195 * amdgpu_bo_mmap_offset - return mmap offset of bo
196 * @bo:	amdgpu object for which we query the offset
197 *
198 * Returns mmap offset of the object.
199 */
200static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
201{
202	return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
203}
204
205/**
206 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
207 */
208static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
209{
210	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
211	unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
212	struct drm_mm_node *node = bo->tbo.mem.mm_node;
213	unsigned long pages_left;
214
215	if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
216		return false;
217
218	for (pages_left = bo->tbo.mem.num_pages; pages_left;
219	     pages_left -= node->size, node++)
220		if (node->start < fpfn)
221			return true;
222
223	return false;
224}
225
226/**
227 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
228 */
229static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
230{
231	return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
232}
233
234bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
235void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
236
237int amdgpu_bo_create(struct amdgpu_device *adev,
238		     struct amdgpu_bo_param *bp,
239		     struct amdgpu_bo **bo_ptr);
240int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
241			      unsigned long size, int align,
242			      u32 domain, struct amdgpu_bo **bo_ptr,
243			      u64 *gpu_addr, void **cpu_addr);
244int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
245			    unsigned long size, int align,
246			    u32 domain, struct amdgpu_bo **bo_ptr,
247			    u64 *gpu_addr, void **cpu_addr);
248int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
249			       uint64_t offset, uint64_t size, uint32_t domain,
250			       struct amdgpu_bo **bo_ptr, void **cpu_addr);
251void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
252			   void **cpu_addr);
253int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
254void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
255void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
256struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
257void amdgpu_bo_unref(struct amdgpu_bo **bo);
258int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
259int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
260			     u64 min_offset, u64 max_offset);
261int amdgpu_bo_unpin(struct amdgpu_bo *bo);
262int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
263int amdgpu_bo_init(struct amdgpu_device *adev);
264int amdgpu_bo_late_init(struct amdgpu_device *adev);
265void amdgpu_bo_fini(struct amdgpu_device *adev);
266#ifndef __NetBSD__
267int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
268				struct vm_area_struct *vma);
269#endif
270int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
271void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
272int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
273			    uint32_t metadata_size, uint64_t flags);
274int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
275			   size_t buffer_size, uint32_t *metadata_size,
276			   uint64_t *flags);
277void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
278			   bool evict,
279			   struct ttm_mem_reg *new_mem);
280void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
281int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
282void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
283		     bool shared);
284int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
285u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
286int amdgpu_bo_validate(struct amdgpu_bo *bo);
287int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
288			     struct dma_fence **fence);
289uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
290					    uint32_t domain);
291
292/*
293 * sub allocation
294 */
295
296static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
297{
298	return sa_bo->manager->gpu_addr + sa_bo->soffset;
299}
300
301static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
302{
303	return (char *)sa_bo->manager->cpu_ptr + sa_bo->soffset;
304}
305
306int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
307				     struct amdgpu_sa_manager *sa_manager,
308				     unsigned size, u32 align, u32 domain);
309void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
310				      struct amdgpu_sa_manager *sa_manager);
311int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
312				      struct amdgpu_sa_manager *sa_manager);
313int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
314		     struct amdgpu_sa_bo **sa_bo,
315		     unsigned size, unsigned align);
316void amdgpu_sa_bo_free(struct amdgpu_device *adev,
317			      struct amdgpu_sa_bo **sa_bo,
318			      struct dma_fence *fence);
319#if defined(CONFIG_DEBUG_FS)
320void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
321					 struct seq_file *m);
322#endif
323
324bool amdgpu_bo_support_uswc(u64 bo_flags);
325
326
327#endif
328