1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2021 Intel Corporation
4 */
5
6#include <drm/ttm/ttm_placement.h>
7#include <drm/ttm/ttm_tt.h>
8
9#include "i915_drv.h"
10#include "intel_memory_region.h"
11#include "intel_region_ttm.h"
12
13#include "gem/i915_gem_region.h"
14#include "gem/i915_gem_ttm.h"
15#include "gem/i915_gem_ttm_move.h"
16#include "gem/i915_gem_ttm_pm.h"
17
18/**
19 * i915_ttm_backup_free - Free any backup attached to this object
20 * @obj: The object whose backup is to be freed.
21 */
22void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
23{
24	if (obj->ttm.backup) {
25		i915_gem_object_put(obj->ttm.backup);
26		obj->ttm.backup = NULL;
27	}
28}
29
30/**
31 * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
32 * @base: The i915_gem_apply_to_region we derive from.
33 * @allow_gpu: Whether using the gpu blitter is allowed.
34 * @backup_pinned: On backup, backup also pinned objects.
35 */
36struct i915_gem_ttm_pm_apply {
37	struct i915_gem_apply_to_region base;
38	bool allow_gpu : 1;
39	bool backup_pinned : 1;
40};
41
42static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
43			   struct drm_i915_gem_object *obj)
44{
45	struct i915_gem_ttm_pm_apply *pm_apply =
46		container_of(apply, typeof(*pm_apply), base);
47	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
48	struct ttm_buffer_object *backup_bo;
49	struct drm_i915_private *i915 =
50		container_of(bo->bdev, typeof(*i915), bdev);
51	struct drm_i915_gem_object *backup;
52	struct ttm_operation_ctx ctx = {};
53	unsigned int flags;
54	int err = 0;
55
56	if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
57		return 0;
58
59	if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
60		return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
61
62	if (!pm_apply->backup_pinned ||
63	    (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
64		return 0;
65
66	if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
67		return 0;
68
69	/*
70	 * It seems that we might have some framebuffers still pinned at this
71	 * stage, but for such objects we might also need to deal with the CCS
72	 * aux state. Make sure we force the save/restore of the CCS state,
73	 * otherwise we might observe display corruption, when returning from
74	 * suspend.
75	 */
76	flags = 0;
77	if (i915_gem_object_needs_ccs_pages(obj)) {
78		WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
79		WARN_ON_ONCE(!pm_apply->allow_gpu);
80
81		flags = I915_BO_ALLOC_CCS_AUX;
82	}
83	backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
84					       obj->base.size, 0, flags);
85	if (IS_ERR(backup))
86		return PTR_ERR(backup);
87
88	err = i915_gem_object_lock(backup, apply->ww);
89	if (err)
90		goto out_no_lock;
91
92	backup_bo = i915_gem_to_ttm(backup);
93	err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
94	if (err)
95		goto out_no_populate;
96
97	err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
98	if (err) {
99		drm_err(&i915->drm,
100			"Unable to copy from device to system memory, err:%pe\n",
101			ERR_PTR(err));
102		goto out_no_populate;
103	}
104	ttm_bo_wait_ctx(backup_bo, &ctx);
105
106	obj->ttm.backup = backup;
107	return 0;
108
109out_no_populate:
110	i915_gem_ww_unlock_single(backup);
111out_no_lock:
112	i915_gem_object_put(backup);
113
114	return err;
115}
116
117static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
118			    struct drm_i915_gem_object *obj)
119{
120	i915_ttm_backup_free(obj);
121	return 0;
122}
123
124/**
125 * i915_ttm_recover_region - Free the backup of all objects of a region
126 * @mr: The memory region
127 *
128 * Checks all objects of a region if there is backup attached and if so
129 * frees that backup. Typically this is called to recover after a partially
130 * performed backup.
131 */
132void i915_ttm_recover_region(struct intel_memory_region *mr)
133{
134	static const struct i915_gem_apply_to_region_ops recover_ops = {
135		.process_obj = i915_ttm_recover,
136	};
137	struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
138	int ret;
139
140	ret = i915_gem_process_region(mr, &apply);
141	GEM_WARN_ON(ret);
142}
143
144/**
145 * i915_ttm_backup_region - Back up all objects of a region to smem.
146 * @mr: The memory region
147 * @flags: TTM backup flags
148 *
149 * Loops over all objects of a region and either evicts them if they are
150 * evictable or backs them up using a backup object if they are pinned.
151 *
152 * Return: Zero on success. Negative error code on error.
153 */
154int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
155{
156	static const struct i915_gem_apply_to_region_ops backup_ops = {
157		.process_obj = i915_ttm_backup,
158	};
159	struct i915_gem_ttm_pm_apply pm_apply = {
160		.base = {.ops = &backup_ops},
161		.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
162		.backup_pinned = flags & I915_TTM_BACKUP_PINNED,
163	};
164
165	return i915_gem_process_region(mr, &pm_apply.base);
166}
167
168static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
169			    struct drm_i915_gem_object *obj)
170{
171	struct i915_gem_ttm_pm_apply *pm_apply =
172		container_of(apply, typeof(*pm_apply), base);
173	struct drm_i915_gem_object *backup = obj->ttm.backup;
174	struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
175	struct ttm_operation_ctx ctx = {};
176	int err;
177
178	if (!backup)
179		return 0;
180
181	if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
182		return 0;
183
184	err = i915_gem_object_lock(backup, apply->ww);
185	if (err)
186		return err;
187
188	/* Content may have been swapped. */
189	if (!backup_bo->resource)
190		err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
191	if (!err)
192		err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
193	if (!err) {
194		err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
195					    false);
196		GEM_WARN_ON(err);
197		ttm_bo_wait_ctx(backup_bo, &ctx);
198
199		obj->ttm.backup = NULL;
200		err = 0;
201	}
202
203	i915_gem_ww_unlock_single(backup);
204
205	if (!err)
206		i915_gem_object_put(backup);
207
208	return err;
209}
210
211/**
212 * i915_ttm_restore_region - Restore backed-up objects of a region from smem.
213 * @mr: The memory region
214 * @flags: TTM backup flags
215 *
216 * Loops over all objects of a region and if they are backed-up, restores
217 * them from smem.
218 *
219 * Return: Zero on success. Negative error code on error.
220 */
221int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
222{
223	static const struct i915_gem_apply_to_region_ops restore_ops = {
224		.process_obj = i915_ttm_restore,
225	};
226	struct i915_gem_ttm_pm_apply pm_apply = {
227		.base = {.ops = &restore_ops},
228		.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
229	};
230
231	return i915_gem_process_region(mr, &pm_apply.base);
232}
233