117658Sjulian// SPDX-License-Identifier: MIT
217658Sjulian/*
317658Sjulian * Copyright �� 2021 Intel Corporation
417658Sjulian */
517658Sjulian
617658Sjulian#include <drm/ttm/ttm_placement.h>
717658Sjulian#include <drm/ttm/ttm_tt.h>
817658Sjulian
917658Sjulian#include "i915_drv.h"
1017658Sjulian#include "intel_memory_region.h"
1117658Sjulian#include "intel_region_ttm.h"
1217658Sjulian
1317658Sjulian#include "gem/i915_gem_region.h"
1417658Sjulian#include "gem/i915_gem_ttm.h"
1517658Sjulian#include "gem/i915_gem_ttm_move.h"
1617658Sjulian#include "gem/i915_gem_ttm_pm.h"
1717658Sjulian
1817658Sjulian/**
1917658Sjulian * i915_ttm_backup_free - Free any backup attached to this object
2017658Sjulian * @obj: The object whose backup is to be freed.
2117658Sjulian */
2217658Sjulianvoid i915_ttm_backup_free(struct drm_i915_gem_object *obj)
2317658Sjulian{
2417658Sjulian	if (obj->ttm.backup) {
2517658Sjulian		i915_gem_object_put(obj->ttm.backup);
2617658Sjulian		obj->ttm.backup = NULL;
2717658Sjulian	}
2817658Sjulian}
2917658Sjulian
3017658Sjulian/**
3117658Sjulian * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
3217658Sjulian * @base: The i915_gem_apply_to_region we derive from.
3317658Sjulian * @allow_gpu: Whether using the gpu blitter is allowed.
3417658Sjulian * @backup_pinned: On backup, backup also pinned objects.
3517658Sjulian */
3617658Sjulianstruct i915_gem_ttm_pm_apply {
37116182Sobrien	struct i915_gem_apply_to_region base;
38116182Sobrien	bool allow_gpu : 1;
39116182Sobrien	bool backup_pinned : 1;
40131927Smarcel};
4133445Seivind
42106024Srwatsonstatic int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
4328976Sbde			   struct drm_i915_gem_object *obj)
4428976Sbde{
4517658Sjulian	struct i915_gem_ttm_pm_apply *pm_apply =
4617658Sjulian		container_of(apply, typeof(*pm_apply), base);
4717658Sjulian	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
4860041Sphk	struct ttm_buffer_object *backup_bo;
4931275Sbde	struct drm_i915_private *i915 =
5078767Sjhb		container_of(bo->bdev, typeof(*i915), bdev);
5178767Sjhb	struct drm_i915_gem_object *backup;
5278767Sjhb	struct ttm_operation_ctx ctx = {};
53131927Smarcel	unsigned int flags;
5417658Sjulian	int err = 0;
5555539Sluoqi
56106024Srwatson	if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
5789601Ssobomax		return 0;
5821776Sbde
5978767Sjhb	if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
6078767Sjhb		return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
6178767Sjhb
6278767Sjhb	if (!pm_apply->backup_pinned ||
6317658Sjulian	    (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
6417658Sjulian		return 0;
6578767Sjhb
6617658Sjulian	if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
67118990Smarcel		return 0;
6894169Sphk
6991778Sjake	/*
7017658Sjulian	 * It seems that we might have some framebuffers still pinned at this
7117658Sjulian	 * stage, but for such objects we might also need to deal with the CCS
7217658Sjulian	 * aux state. Make sure we force the save/restore of the CCS state,
7317658Sjulian	 * otherwise we might observe display corruption, when returning from
7417658Sjulian	 * suspend.
7517658Sjulian	 */
7617658Sjulian	flags = 0;
7717658Sjulian	if (i915_gem_object_needs_ccs_pages(obj)) {
7817658Sjulian		WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
7917658Sjulian		WARN_ON_ONCE(!pm_apply->allow_gpu);
8017658Sjulian
8117658Sjulian		flags = I915_BO_ALLOC_CCS_AUX;
8217658Sjulian	}
83131927Smarcel	backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
84131927Smarcel					       obj->base.size, 0, flags);
8542135Smsmith	if (IS_ERR(backup))
8617658Sjulian		return PTR_ERR(backup);
8742135Smsmith
8817658Sjulian	err = i915_gem_object_lock(backup, apply->ww);
8917658Sjulian	if (err)
9046381Sbillf		goto out_no_lock;
91103647Sjhb
92131927Smarcel	backup_bo = i915_gem_to_ttm(backup);
93103647Sjhb	err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
94103647Sjhb	if (err)
95103647Sjhb		goto out_no_populate;
9617658Sjulian
97103647Sjhb	err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
98103647Sjhb	if (err) {
99131927Smarcel		drm_err(&i915->drm,
10017658Sjulian			"Unable to copy from device to system memory, err:%pe\n",
101132506Srwatson			ERR_PTR(err));
10285202Speter		goto out_no_populate;
10385202Speter	}
10485202Speter	ttm_bo_wait_ctx(backup_bo, &ctx);
10543436Smsmith
10643436Smsmith	obj->ttm.backup = backup;
10728000Sjulian	return 0;
10817658Sjulian
10927997Sjulianout_no_populate:
11027997Sjulian	i915_gem_ww_unlock_single(backup);
11127997Sjulianout_no_lock:
11227997Sjulian	i915_gem_object_put(backup);
11327997Sjulian
11427997Sjulian	return err;
11527997Sjulian}
11628000Sjulian
11727997Sjulianstatic int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
11827997Sjulian			    struct drm_i915_gem_object *obj)
11917658Sjulian{
12017658Sjulian	i915_ttm_backup_free(obj);
12117658Sjulian	return 0;
12217658Sjulian}
12317658Sjulian
12493496Sphk/**
12593496Sphk * i915_ttm_recover_region - Free the backup of all objects of a region
12667093Sps * @mr: The memory region
127131927Smarcel *
128131927Smarcel * Checks all objects of a region if there is backup attached and if so
129131927Smarcel * frees that backup. Typically this is called to recover after a partially
130131927Smarcel * performed backup.
13165395Speter */
13265395Spetervoid i915_ttm_recover_region(struct intel_memory_region *mr)
13365395Speter{
13465395Speter	static const struct i915_gem_apply_to_region_ops recover_ops = {
13565395Speter		.process_obj = i915_ttm_recover,
13617658Sjulian	};
13750107Smsmith	struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
138110859Salfred	int ret;
13950107Smsmith
14050107Smsmith	ret = i915_gem_process_region(mr, &apply);
141110859Salfred	GEM_WARN_ON(ret);
142110859Salfred}
143110859Salfred
144110859Salfred/**
145110859Salfred * i915_ttm_backup_region - Back up all objects of a region to smem.
146110859Salfred * @mr: The memory region
147110859Salfred * @flags: TTM backup flags
148110859Salfred *
149110859Salfred * Loops over all objects of a region and either evicts them if they are
15050107Smsmith * evictable or backs them up using a backup object if they are pinned.
15148868Sphk *
15250107Smsmith * Return: Zero on success. Negative error code on error.
15350107Smsmith */
15417658Sjulianint i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
15517658Sjulian{
15682749Sdillon	static const struct i915_gem_apply_to_region_ops backup_ops = {
15782749Sdillon		.process_obj = i915_ttm_backup,
15817658Sjulian	};
15982749Sdillon	struct i915_gem_ttm_pm_apply pm_apply = {
16017658Sjulian		.base = {.ops = &backup_ops},
16183366Sjulian		.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
16217658Sjulian		.backup_pinned = flags & I915_TTM_BACKUP_PINNED,
16317658Sjulian	};
16417658Sjulian
165106024Srwatson	return i915_gem_process_region(mr, &pm_apply.base);
166106024Srwatson}
167106024Srwatson
168106024Srwatsonstatic int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
169106024Srwatson			    struct drm_i915_gem_object *obj)
170106024Srwatson{
171106024Srwatson	struct i915_gem_ttm_pm_apply *pm_apply =
172106024Srwatson		container_of(apply, typeof(*pm_apply), base);
17382749Sdillon	struct drm_i915_gem_object *backup = obj->ttm.backup;
174106024Srwatson	struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
175106024Srwatson	struct ttm_operation_ctx ctx = {};
17682749Sdillon	int err;
17717658Sjulian
17817658Sjulian	if (!backup)
17917658Sjulian		return 0;
18017658Sjulian
18117658Sjulian	if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
18265268Smsmith		return 0;
18365268Smsmith
18417658Sjulian	err = i915_gem_object_lock(backup, apply->ww);
18565268Smsmith	if (err)
18617658Sjulian		return err;
187110859Salfred
18865268Smsmith	/* Content may have been swapped. */
189110859Salfred	if (!backup_bo->resource)
19017658Sjulian		err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
19117658Sjulian	if (!err)
19273913Sjhb		err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
19317658Sjulian	if (!err) {
19473913Sjhb		err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
19517658Sjulian					    false);
19617658Sjulian		GEM_WARN_ON(err);
19717658Sjulian		ttm_bo_wait_ctx(backup_bo, &ctx);
19817658Sjulian
19917658Sjulian		obj->ttm.backup = NULL;
20017658Sjulian		err = 0;
20117658Sjulian	}
20217658Sjulian
20354233Sphk	i915_gem_ww_unlock_single(backup);
20465395Speter
20554233Sphk	if (!err)
20654233Sphk		i915_gem_object_put(backup);
20754233Sphk
20854233Sphk	return err;
20954233Sphk}
21054233Sphk
21154233Sphk/**
21254233Sphk * i915_ttm_restore_region - Restore backed-up objects of a region from smem.
21365764Sjhb * @mr: The memory region
21454233Sphk * @flags: TTM backup flags
21554233Sphk *
21654233Sphk * Loops over all objects of a region and if they are backed-up, restores
21754233Sphk * them from smem.
21865764Sjhb *
21954233Sphk * Return: Zero on success. Negative error code on error.
22054233Sphk */
22154233Sphkint i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
22254233Sphk{
22365764Sjhb	static const struct i915_gem_apply_to_region_ops restore_ops = {
22454233Sphk		.process_obj = i915_ttm_restore,
22554233Sphk	};
22654233Sphk	struct i915_gem_ttm_pm_apply pm_apply = {
22765764Sjhb		.base = {.ops = &restore_ops},
22854233Sphk		.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
22954233Sphk	};
23094169Sphk
23194169Sphk	return i915_gem_process_region(mr, &pm_apply.base);
23294169Sphk}
233110859Salfred