1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2021 Intel Corporation
4 */
5#include <drm/ttm/ttm_device.h>
6#include <drm/ttm/ttm_range_manager.h>
7
8#include "i915_drv.h"
9#include "i915_scatterlist.h"
10#include "i915_ttm_buddy_manager.h"
11
12#include "intel_region_ttm.h"
13
14#include "gem/i915_gem_region.h"
15#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
16/**
17 * DOC: TTM support structure
18 *
19 * The code in this file deals with setting up memory managers for TTM
20 * LMEM and MOCK regions and converting the output from
21 * the managers to struct sg_table, Basically providing the mapping from
22 * i915 GEM regions to TTM memory types and resource managers.
23 */
24
25/**
26 * intel_region_ttm_device_init - Initialize a TTM device
27 * @dev_priv: Pointer to an i915 device private structure.
28 *
29 * Return: 0 on success, negative error code on failure.
30 */
31int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
32{
33	struct drm_device *drm = &dev_priv->drm;
34
35#ifdef notyet
36	return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
37			       drm->dev, drm->anon_inode->i_mapping,
38			       drm->vma_offset_manager, false, false);
39#else
40	return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
41			       drm->dev, /*drm->anon_inode->i_mapping*/NULL,
42			       drm->vma_offset_manager, false, false);
43#endif
44}
45
46/**
47 * intel_region_ttm_device_fini - Finalize a TTM device
48 * @dev_priv: Pointer to an i915 device private structure.
49 */
50void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
51{
52	ttm_device_fini(&dev_priv->bdev);
53}
54
55/*
56 * Map the i915 memory regions to TTM memory types. We use the
57 * driver-private types for now, reserving TTM_PL_VRAM for stolen
58 * memory and TTM_PL_TT for GGTT use if decided to implement this.
59 */
60int intel_region_to_ttm_type(const struct intel_memory_region *mem)
61{
62	int type;
63
64	GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
65		   mem->type != INTEL_MEMORY_MOCK &&
66		   mem->type != INTEL_MEMORY_SYSTEM);
67
68	if (mem->type == INTEL_MEMORY_SYSTEM)
69		return TTM_PL_SYSTEM;
70
71	type = mem->instance + TTM_PL_PRIV;
72	GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
73
74	return type;
75}
76
77/**
78 * intel_region_ttm_init - Initialize a memory region for TTM.
79 * @mem: The region to initialize.
80 *
81 * This function initializes a suitable TTM resource manager for the
82 * region, and if it's a LMEM region type, attaches it to the TTM
83 * device. MOCK regions are NOT attached to the TTM device, since we don't
84 * have one for the mock selftests.
85 *
86 * Return: 0 on success, negative error code on failure.
87 */
88int intel_region_ttm_init(struct intel_memory_region *mem)
89{
90	struct ttm_device *bdev = &mem->i915->bdev;
91	int mem_type = intel_region_to_ttm_type(mem);
92	int ret;
93
94	ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
95				      resource_size(&mem->region),
96				      mem->io_size,
97				      mem->min_page_size, PAGE_SIZE);
98	if (ret)
99		return ret;
100
101	mem->region_private = ttm_manager_type(bdev, mem_type);
102
103	return 0;
104}
105
106/**
107 * intel_region_ttm_fini - Finalize a TTM region.
108 * @mem: The memory region
109 *
110 * This functions takes down the TTM resource manager associated with the
111 * memory region, and if it was registered with the TTM device,
112 * removes that registration.
113 */
114int intel_region_ttm_fini(struct intel_memory_region *mem)
115{
116	struct ttm_resource_manager *man = mem->region_private;
117	int ret = -EBUSY;
118	int count;
119
120	/*
121	 * Put the region's move fences. This releases requests that
122	 * may hold on to contexts and vms that may hold on to buffer
123	 * objects placed in this region.
124	 */
125	if (man)
126		ttm_resource_manager_cleanup(man);
127
128	/* Flush objects from region. */
129	for (count = 0; count < 10; ++count) {
130		i915_gem_flush_free_objects(mem->i915);
131
132		mutex_lock(&mem->objects.lock);
133		if (list_empty(&mem->objects.list))
134			ret = 0;
135		mutex_unlock(&mem->objects.lock);
136		if (!ret)
137			break;
138
139		drm_msleep(20);
140		drain_workqueue(mem->i915->bdev.wq);
141	}
142
143	/* If we leaked objects, Don't free the region causing use after free */
144	if (ret || !man)
145		return ret;
146
147	ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
148				      intel_region_to_ttm_type(mem));
149	GEM_WARN_ON(ret);
150	mem->region_private = NULL;
151
152	return ret;
153}
154
155/**
156 * intel_region_ttm_resource_to_rsgt -
157 * Convert an opaque TTM resource manager resource to a refcounted sg_table.
158 * @mem: The memory region.
159 * @res: The resource manager resource obtained from the TTM resource manager.
160 * @page_alignment: Required page alignment for each sg entry. Power of two.
161 *
162 * The gem backends typically use sg-tables for operations on the underlying
163 * io_memory. So provide a way for the backends to translate the
164 * nodes they are handed from TTM to sg-tables.
165 *
166 * Return: A malloced sg_table on success, an error pointer on failure.
167 */
168struct i915_refct_sgt *
169intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
170				  struct ttm_resource *res,
171				  u32 page_alignment)
172{
173	if (mem->is_range_manager) {
174		struct ttm_range_mgr_node *range_node =
175			to_ttm_range_mgr_node(res);
176
177		return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
178					      mem->region.start,
179					      page_alignment);
180	} else {
181		return i915_rsgt_from_buddy_resource(res, mem->region.start,
182						     page_alignment);
183	}
184}
185
186#ifdef CONFIG_DRM_I915_SELFTEST
187/**
188 * intel_region_ttm_resource_alloc - Allocate memory resources from a region
189 * @mem: The memory region,
190 * @offset: BO offset
191 * @size: The requested size in bytes
192 * @flags: Allocation flags
193 *
194 * This functionality is provided only for callers that need to allocate
195 * memory from standalone TTM range managers, without the TTM eviction
196 * functionality. Don't use if you are not completely sure that's the
197 * case. The returned opaque node can be converted to an sg_table using
198 * intel_region_ttm_resource_to_st(), and can be freed using
199 * intel_region_ttm_resource_free().
200 *
201 * Return: A valid pointer on success, an error pointer on failure.
202 */
203struct ttm_resource *
204intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
205				resource_size_t offset,
206				resource_size_t size,
207				unsigned int flags)
208{
209	struct ttm_resource_manager *man = mem->region_private;
210	struct ttm_place place = {};
211	struct ttm_buffer_object mock_bo = {};
212	struct ttm_resource *res;
213	int ret;
214
215	if (flags & I915_BO_ALLOC_CONTIGUOUS)
216		place.flags |= TTM_PL_FLAG_CONTIGUOUS;
217	if (offset != I915_BO_INVALID_OFFSET) {
218		if (WARN_ON(overflows_type(offset >> PAGE_SHIFT, place.fpfn))) {
219			ret = -E2BIG;
220			goto out;
221		}
222		place.fpfn = offset >> PAGE_SHIFT;
223		if (WARN_ON(overflows_type(place.fpfn + (size >> PAGE_SHIFT), place.lpfn))) {
224			ret = -E2BIG;
225			goto out;
226		}
227		place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
228	} else if (mem->io_size && mem->io_size < mem->total) {
229		if (flags & I915_BO_ALLOC_GPU_ONLY) {
230			place.flags |= TTM_PL_FLAG_TOPDOWN;
231		} else {
232			place.fpfn = 0;
233			if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
234				ret = -E2BIG;
235				goto out;
236			}
237			place.lpfn = mem->io_size >> PAGE_SHIFT;
238		}
239	}
240
241	mock_bo.base.size = size;
242	mock_bo.bdev = &mem->i915->bdev;
243
244	ret = man->func->alloc(man, &mock_bo, &place, &res);
245
246out:
247	if (ret == -ENOSPC)
248		ret = -ENXIO;
249	if (!ret)
250		res->bo = NULL; /* Rather blow up, then some uaf */
251	return ret ? ERR_PTR(ret) : res;
252}
253
254#endif
255
256/**
257 * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
258 * @mem: The region the resource was allocated from.
259 * @res: The opaque resource representing an allocation.
260 */
261void intel_region_ttm_resource_free(struct intel_memory_region *mem,
262				    struct ttm_resource *res)
263{
264	struct ttm_resource_manager *man = mem->region_private;
265	struct ttm_buffer_object mock_bo = {};
266
267	mock_bo.base.size = res->size;
268	mock_bo.bdev = &mem->i915->bdev;
269	res->bo = &mock_bo;
270
271	man->func->free(man, res);
272}
273